From 8626131ca646d68d03fd2d310ab905cf02ca960f Mon Sep 17 00:00:00 2001 From: Saad Khalid Date: Tue, 7 Feb 2023 11:45:55 +0000 Subject: [PATCH] updating riscv-isa-sim vendored repo --- .../.github/workflows/apt-packages.txt | 2 + .../workflows/continuous-integration.yml | 28 + vendor/riscv-isa-sim/.gitignore | 7 + vendor/riscv-isa-sim/ChangeLog.md | 36 + vendor/riscv-isa-sim/LICENSE | 24 + vendor/riscv-isa-sim/Makefile.in | 524 ++ vendor/riscv-isa-sim/README.md | 300 + vendor/riscv-isa-sim/VERSION | 1 + vendor/riscv-isa-sim/aclocal.m4 | 302 + .../arch_test_target/spike/Makefile.include | 25 + .../arch_test_target/spike/README.md | 58 + .../spike/device/Makefile_common.inc | 34 + .../rv32e_unratified/C/Makefile.include | 7 + .../rv32e_unratified/E/Makefile.include | 7 + .../rv32e_unratified/M/Makefile.include | 7 + .../spike/device/rv32i_m/C/Makefile.include | 7 + .../spike/device/rv32i_m/F/Makefile.include | 7 + .../spike/device/rv32i_m/I/Makefile.include | 7 + .../spike/device/rv32i_m/M/Makefile.include | 7 + .../device/rv32i_m/Zifencei/Makefile.include | 7 + .../device/rv32i_m/privilege/Makefile.include | 7 + .../spike/device/rv64i_m/C/Makefile.include | 7 + .../spike/device/rv64i_m/D/Makefile.include | 8 + .../spike/device/rv64i_m/I/Makefile.include | 7 + .../spike/device/rv64i_m/M/Makefile.include | 8 + .../device/rv64i_m/Zifencei/Makefile.include | 7 + .../device/rv64i_m/privilege/Makefile.include | 7 + .../arch_test_target/spike/link.ld | 18 + .../arch_test_target/spike/model_test.h | 70 + vendor/riscv-isa-sim/ax_append_flag.m4 | 50 + vendor/riscv-isa-sim/ax_append_link_flags.m4 | 44 + vendor/riscv-isa-sim/ax_boost_asio.m4 | 110 + vendor/riscv-isa-sim/ax_boost_base.m4 | 303 + vendor/riscv-isa-sim/ax_boost_regex.m4 | 111 + vendor/riscv-isa-sim/ax_check_compile_flag.m4 | 53 + vendor/riscv-isa-sim/ax_check_link_flag.m4 | 53 + vendor/riscv-isa-sim/ax_require_defined.m4 | 37 + vendor/riscv-isa-sim/ci-tests/test-spike | 11 + vendor/riscv-isa-sim/config.h.in | 142 + vendor/riscv-isa-sim/configure | 7714 +++++++++++++++++ vendor/riscv-isa-sim/configure.ac | 126 + vendor/riscv-isa-sim/customext/cflush.cc | 42 + vendor/riscv-isa-sim/customext/customext.ac | 0 .../riscv-isa-sim/customext/customext.mk.in | 11 + vendor/riscv-isa-sim/customext/dummy_rocc.cc | 47 + .../riscv-isa-sim/customext/dummy_rocc_test.c | 29 + vendor/riscv-isa-sim/debug_rom/.gitignore | 5 + vendor/riscv-isa-sim/debug_rom/Makefile | 24 + vendor/riscv-isa-sim/debug_rom/debug_rom.S | 79 + vendor/riscv-isa-sim/debug_rom/debug_rom.h | 13 + vendor/riscv-isa-sim/debug_rom/link.ld | 15 + vendor/riscv-isa-sim/disasm/disasm.ac | 0 vendor/riscv-isa-sim/disasm/disasm.cc | 2147 +++++ vendor/riscv-isa-sim/disasm/disasm.mk.in | 5 + vendor/riscv-isa-sim/disasm/regnames.cc | 33 + vendor/riscv-isa-sim/fdt/fdt.ac | 0 vendor/riscv-isa-sim/fdt/fdt.c | 291 + vendor/riscv-isa-sim/fdt/fdt.h | 66 + vendor/riscv-isa-sim/fdt/fdt.mk.in | 17 + vendor/riscv-isa-sim/fdt/fdt_addresses.c | 101 + vendor/riscv-isa-sim/fdt/fdt_empty_tree.c | 38 + vendor/riscv-isa-sim/fdt/fdt_overlay.c | 881 ++ vendor/riscv-isa-sim/fdt/fdt_ro.c | 898 ++ vendor/riscv-isa-sim/fdt/fdt_rw.c | 476 + vendor/riscv-isa-sim/fdt/fdt_strerror.c | 59 + vendor/riscv-isa-sim/fdt/fdt_sw.c | 376 + vendor/riscv-isa-sim/fdt/fdt_wip.c | 94 + vendor/riscv-isa-sim/fdt/libfdt.h | 2077 +++++ vendor/riscv-isa-sim/fdt/libfdt_env.h | 95 + vendor/riscv-isa-sim/fdt/libfdt_internal.h | 51 + vendor/riscv-isa-sim/fesvr/byteorder.h | 94 + vendor/riscv-isa-sim/fesvr/context.cc | 115 + vendor/riscv-isa-sim/fesvr/context.h | 54 + vendor/riscv-isa-sim/fesvr/debug_defines.h | 1418 +++ vendor/riscv-isa-sim/fesvr/device.cc | 154 + vendor/riscv-isa-sim/fesvr/device.h | 118 + vendor/riscv-isa-sim/fesvr/dtm.cc | 644 ++ vendor/riscv-isa-sim/fesvr/dtm.h | 115 + vendor/riscv-isa-sim/fesvr/dummy.cc | 4 + vendor/riscv-isa-sim/fesvr/elf.h | 134 + vendor/riscv-isa-sim/fesvr/elf2hex.cc | 47 + vendor/riscv-isa-sim/fesvr/elfloader.cc | 117 + vendor/riscv-isa-sim/fesvr/elfloader.h | 13 + vendor/riscv-isa-sim/fesvr/fesvr.ac | 11 + vendor/riscv-isa-sim/fesvr/fesvr.mk.in | 41 + vendor/riscv-isa-sim/fesvr/fesvr.pc.in | 26 + vendor/riscv-isa-sim/fesvr/htif.cc | 415 + vendor/riscv-isa-sim/fesvr/htif.h | 156 + vendor/riscv-isa-sim/fesvr/htif_hexwriter.cc | 76 + vendor/riscv-isa-sim/fesvr/htif_hexwriter.h | 32 + vendor/riscv-isa-sim/fesvr/htif_pthread.cc | 66 + vendor/riscv-isa-sim/fesvr/htif_pthread.h | 38 + vendor/riscv-isa-sim/fesvr/memif.cc | 183 + vendor/riscv-isa-sim/fesvr/memif.h | 82 + vendor/riscv-isa-sim/fesvr/option_parser.cc | 51 + vendor/riscv-isa-sim/fesvr/option_parser.h | 31 + vendor/riscv-isa-sim/fesvr/rfb.cc | 230 + vendor/riscv-isa-sim/fesvr/rfb.h | 53 + vendor/riscv-isa-sim/fesvr/syscall.cc | 502 ++ vendor/riscv-isa-sim/fesvr/syscall.h | 73 + vendor/riscv-isa-sim/fesvr/term.cc | 53 + vendor/riscv-isa-sim/fesvr/term.h | 11 + vendor/riscv-isa-sim/fesvr/tsi.cc | 115 + vendor/riscv-isa-sim/fesvr/tsi.h | 57 + vendor/riscv-isa-sim/riscv-disasm.pc.in | 11 + vendor/riscv-isa-sim/riscv-fesvr.pc.in | 11 + vendor/riscv-isa-sim/riscv/abstract_device.h | 15 + vendor/riscv-isa-sim/riscv/arith.h | 216 + vendor/riscv-isa-sim/riscv/cachesim.cc | 210 + vendor/riscv-isa-sim/riscv/cachesim.h | 135 + vendor/riscv-isa-sim/riscv/cfg.h | 88 + vendor/riscv-isa-sim/riscv/clint.cc | 89 + vendor/riscv-isa-sim/riscv/common.h | 18 + vendor/riscv-isa-sim/riscv/csrs.cc | 1297 +++ vendor/riscv-isa-sim/riscv/csrs.h | 701 ++ vendor/riscv-isa-sim/riscv/debug_defines.h | 2538 ++++++ vendor/riscv-isa-sim/riscv/debug_module.cc | 938 ++ vendor/riscv-isa-sim/riscv/debug_module.h | 193 + .../riscv-isa-sim/riscv/debug_rom_defines.h | 23 + vendor/riscv-isa-sim/riscv/decode.h | 2996 +++++++ vendor/riscv-isa-sim/riscv/devices.cc | 139 + vendor/riscv-isa-sim/riscv/devices.h | 87 + vendor/riscv-isa-sim/riscv/disasm.h | 109 + vendor/riscv-isa-sim/riscv/dts.cc | 327 + vendor/riscv-isa-sim/riscv/dts.h | 27 + vendor/riscv-isa-sim/riscv/encoding.h | 4810 ++++++++++ vendor/riscv-isa-sim/riscv/entropy_source.h | 119 + vendor/riscv-isa-sim/riscv/execute.cc | 358 + vendor/riscv-isa-sim/riscv/extension.cc | 23 + vendor/riscv-isa-sim/riscv/extension.h | 38 + vendor/riscv-isa-sim/riscv/extensions.cc | 46 + vendor/riscv-isa-sim/riscv/insn_macros.h | 9 + vendor/riscv-isa-sim/riscv/insn_template.cc | 47 + vendor/riscv-isa-sim/riscv/insn_template.h | 9 + vendor/riscv-isa-sim/riscv/insns/add.h | 1 + vendor/riscv-isa-sim/riscv/insns/add16.h | 3 + vendor/riscv-isa-sim/riscv/insns/add32.h | 4 + vendor/riscv-isa-sim/riscv/insns/add64.h | 3 + vendor/riscv-isa-sim/riscv/insns/add8.h | 3 + vendor/riscv-isa-sim/riscv/insns/add_uw.h | 3 + vendor/riscv-isa-sim/riscv/insns/addi.h | 1 + vendor/riscv-isa-sim/riscv/insns/addiw.h | 2 + vendor/riscv-isa-sim/riscv/insns/addw.h | 2 + vendor/riscv-isa-sim/riscv/insns/aes32dsi.h | 16 + vendor/riscv-isa-sim/riscv/insns/aes32dsmi.h | 21 + vendor/riscv-isa-sim/riscv/insns/aes32esi.h | 16 + vendor/riscv-isa-sim/riscv/insns/aes32esmi.h | 21 + vendor/riscv-isa-sim/riscv/insns/aes64ds.h | 21 + vendor/riscv-isa-sim/riscv/insns/aes64dsm.h | 29 + vendor/riscv-isa-sim/riscv/insns/aes64es.h | 21 + vendor/riscv-isa-sim/riscv/insns/aes64esm.h | 29 + vendor/riscv-isa-sim/riscv/insns/aes64im.h | 16 + vendor/riscv-isa-sim/riscv/insns/aes64ks1i.h | 38 + vendor/riscv-isa-sim/riscv/insns/aes64ks2.h | 16 + vendor/riscv-isa-sim/riscv/insns/aes_common.h | 156 + vendor/riscv-isa-sim/riscv/insns/amoadd_d.h | 3 + vendor/riscv-isa-sim/riscv/insns/amoadd_w.h | 2 + vendor/riscv-isa-sim/riscv/insns/amoand_d.h | 3 + vendor/riscv-isa-sim/riscv/insns/amoand_w.h | 2 + vendor/riscv-isa-sim/riscv/insns/amomax_d.h | 3 + vendor/riscv-isa-sim/riscv/insns/amomax_w.h | 2 + vendor/riscv-isa-sim/riscv/insns/amomaxu_d.h | 3 + vendor/riscv-isa-sim/riscv/insns/amomaxu_w.h | 2 + vendor/riscv-isa-sim/riscv/insns/amomin_d.h | 3 + vendor/riscv-isa-sim/riscv/insns/amomin_w.h | 2 + vendor/riscv-isa-sim/riscv/insns/amominu_d.h | 3 + vendor/riscv-isa-sim/riscv/insns/amominu_w.h | 2 + vendor/riscv-isa-sim/riscv/insns/amoor_d.h | 3 + vendor/riscv-isa-sim/riscv/insns/amoor_w.h | 2 + vendor/riscv-isa-sim/riscv/insns/amoswap_d.h | 3 + vendor/riscv-isa-sim/riscv/insns/amoswap_w.h | 2 + vendor/riscv-isa-sim/riscv/insns/amoxor_d.h | 3 + vendor/riscv-isa-sim/riscv/insns/amoxor_w.h | 2 + vendor/riscv-isa-sim/riscv/insns/and.h | 1 + vendor/riscv-isa-sim/riscv/insns/andi.h | 1 + vendor/riscv-isa-sim/riscv/insns/andn.h | 2 + vendor/riscv-isa-sim/riscv/insns/auipc.h | 1 + vendor/riscv-isa-sim/riscv/insns/ave.h | 5 + vendor/riscv-isa-sim/riscv/insns/bclr.h | 3 + vendor/riscv-isa-sim/riscv/insns/bclri.h | 3 + vendor/riscv-isa-sim/riscv/insns/bcompress.h | 9 + vendor/riscv-isa-sim/riscv/insns/bcompressw.h | 10 + .../riscv-isa-sim/riscv/insns/bdecompress.h | 9 + .../riscv-isa-sim/riscv/insns/bdecompressw.h | 10 + vendor/riscv-isa-sim/riscv/insns/beq.h | 2 + vendor/riscv-isa-sim/riscv/insns/bext.h | 3 + vendor/riscv-isa-sim/riscv/insns/bexti.h | 3 + vendor/riscv-isa-sim/riscv/insns/bfp.h | 10 + vendor/riscv-isa-sim/riscv/insns/bfpw.h | 9 + vendor/riscv-isa-sim/riscv/insns/bge.h | 2 + vendor/riscv-isa-sim/riscv/insns/bgeu.h | 2 + vendor/riscv-isa-sim/riscv/insns/binv.h | 3 + vendor/riscv-isa-sim/riscv/insns/binvi.h | 3 + vendor/riscv-isa-sim/riscv/insns/blt.h | 2 + vendor/riscv-isa-sim/riscv/insns/bltu.h | 2 + vendor/riscv-isa-sim/riscv/insns/bmatflip.h | 11 + vendor/riscv-isa-sim/riscv/insns/bmator.h | 29 + vendor/riscv-isa-sim/riscv/insns/bmatxor.h | 29 + vendor/riscv-isa-sim/riscv/insns/bne.h | 2 + vendor/riscv-isa-sim/riscv/insns/bset.h | 3 + vendor/riscv-isa-sim/riscv/insns/bseti.h | 3 + vendor/riscv-isa-sim/riscv/insns/c_add.h | 3 + vendor/riscv-isa-sim/riscv/insns/c_addi.h | 2 + vendor/riscv-isa-sim/riscv/insns/c_addi4spn.h | 3 + vendor/riscv-isa-sim/riscv/insns/c_addw.h | 3 + vendor/riscv-isa-sim/riscv/insns/c_and.h | 2 + vendor/riscv-isa-sim/riscv/insns/c_andi.h | 2 + vendor/riscv-isa-sim/riscv/insns/c_beqz.h | 3 + vendor/riscv-isa-sim/riscv/insns/c_bnez.h | 3 + vendor/riscv-isa-sim/riscv/insns/c_ebreak.h | 2 + vendor/riscv-isa-sim/riscv/insns/c_fld.h | 4 + vendor/riscv-isa-sim/riscv/insns/c_fldsp.h | 4 + vendor/riscv-isa-sim/riscv/insns/c_flw.h | 8 + vendor/riscv-isa-sim/riscv/insns/c_flwsp.h | 9 + vendor/riscv-isa-sim/riscv/insns/c_fsd.h | 4 + vendor/riscv-isa-sim/riscv/insns/c_fsdsp.h | 4 + vendor/riscv-isa-sim/riscv/insns/c_fsw.h | 8 + vendor/riscv-isa-sim/riscv/insns/c_fswsp.h | 8 + vendor/riscv-isa-sim/riscv/insns/c_j.h | 2 + vendor/riscv-isa-sim/riscv/insns/c_jal.h | 9 + vendor/riscv-isa-sim/riscv/insns/c_jalr.h | 5 + vendor/riscv-isa-sim/riscv/insns/c_jr.h | 3 + vendor/riscv-isa-sim/riscv/insns/c_li.h | 2 + vendor/riscv-isa-sim/riscv/insns/c_lui.h | 8 + vendor/riscv-isa-sim/riscv/insns/c_lw.h | 2 + vendor/riscv-isa-sim/riscv/insns/c_lwsp.h | 3 + vendor/riscv-isa-sim/riscv/insns/c_mv.h | 3 + vendor/riscv-isa-sim/riscv/insns/c_or.h | 2 + vendor/riscv-isa-sim/riscv/insns/c_slli.h | 3 + vendor/riscv-isa-sim/riscv/insns/c_srai.h | 3 + vendor/riscv-isa-sim/riscv/insns/c_srli.h | 3 + vendor/riscv-isa-sim/riscv/insns/c_sub.h | 2 + vendor/riscv-isa-sim/riscv/insns/c_subw.h | 3 + vendor/riscv-isa-sim/riscv/insns/c_sw.h | 2 + vendor/riscv-isa-sim/riscv/insns/c_swsp.h | 2 + vendor/riscv-isa-sim/riscv/insns/c_xor.h | 2 + vendor/riscv-isa-sim/riscv/insns/cbo_clean.h | 4 + vendor/riscv-isa-sim/riscv/insns/cbo_flush.h | 4 + vendor/riscv-isa-sim/riscv/insns/cbo_inval.h | 9 + vendor/riscv-isa-sim/riscv/insns/cbo_zero.h | 4 + vendor/riscv-isa-sim/riscv/insns/clmul.h | 6 + vendor/riscv-isa-sim/riscv/insns/clmulh.h | 6 + vendor/riscv-isa-sim/riscv/insns/clmulhw.h | 6 + vendor/riscv-isa-sim/riscv/insns/clmulr.h | 6 + vendor/riscv-isa-sim/riscv/insns/clmulrw.h | 6 + vendor/riscv-isa-sim/riscv/insns/clmulw.h | 6 + vendor/riscv-isa-sim/riscv/insns/clo16.h | 11 + vendor/riscv-isa-sim/riscv/insns/clo32.h | 12 + vendor/riscv-isa-sim/riscv/insns/clo8.h | 10 + vendor/riscv-isa-sim/riscv/insns/clrs16.h | 12 + vendor/riscv-isa-sim/riscv/insns/clrs32.h | 13 + vendor/riscv-isa-sim/riscv/insns/clrs8.h | 11 + vendor/riscv-isa-sim/riscv/insns/clz.h | 5 + vendor/riscv-isa-sim/riscv/insns/clz16.h | 10 + vendor/riscv-isa-sim/riscv/insns/clz32.h | 12 + vendor/riscv-isa-sim/riscv/insns/clz8.h | 9 + vendor/riscv-isa-sim/riscv/insns/clzw.h | 6 + vendor/riscv-isa-sim/riscv/insns/cmix.h | 2 + vendor/riscv-isa-sim/riscv/insns/cmov.h | 2 + vendor/riscv-isa-sim/riscv/insns/cmpeq16.h | 3 + vendor/riscv-isa-sim/riscv/insns/cmpeq8.h | 3 + vendor/riscv-isa-sim/riscv/insns/cpop.h | 5 + vendor/riscv-isa-sim/riscv/insns/cpopw.h | 6 + vendor/riscv-isa-sim/riscv/insns/cras16.h | 5 + vendor/riscv-isa-sim/riscv/insns/cras32.h | 6 + vendor/riscv-isa-sim/riscv/insns/crc32_b.h | 5 + vendor/riscv-isa-sim/riscv/insns/crc32_d.h | 6 + vendor/riscv-isa-sim/riscv/insns/crc32_h.h | 5 + vendor/riscv-isa-sim/riscv/insns/crc32_w.h | 5 + vendor/riscv-isa-sim/riscv/insns/crc32c_b.h | 5 + vendor/riscv-isa-sim/riscv/insns/crc32c_d.h | 6 + vendor/riscv-isa-sim/riscv/insns/crc32c_h.h | 5 + vendor/riscv-isa-sim/riscv/insns/crc32c_w.h | 5 + vendor/riscv-isa-sim/riscv/insns/crsa16.h | 5 + vendor/riscv-isa-sim/riscv/insns/crsa32.h | 6 + vendor/riscv-isa-sim/riscv/insns/csrrc.h | 8 + vendor/riscv-isa-sim/riscv/insns/csrrci.h | 8 + vendor/riscv-isa-sim/riscv/insns/csrrs.h | 8 + vendor/riscv-isa-sim/riscv/insns/csrrsi.h | 8 + vendor/riscv-isa-sim/riscv/insns/csrrw.h | 5 + vendor/riscv-isa-sim/riscv/insns/csrrwi.h | 5 + vendor/riscv-isa-sim/riscv/insns/ctz.h | 5 + vendor/riscv-isa-sim/riscv/insns/ctzw.h | 6 + vendor/riscv-isa-sim/riscv/insns/div.h | 9 + vendor/riscv-isa-sim/riscv/insns/divu.h | 7 + vendor/riscv-isa-sim/riscv/insns/divuw.h | 8 + vendor/riscv-isa-sim/riscv/insns/divw.h | 8 + vendor/riscv-isa-sim/riscv/insns/dret.h | 9 + vendor/riscv-isa-sim/riscv/insns/ebreak.h | 1 + vendor/riscv-isa-sim/riscv/insns/ecall.h | 11 + vendor/riscv-isa-sim/riscv/insns/fadd_d.h | 5 + vendor/riscv-isa-sim/riscv/insns/fadd_h.h | 5 + vendor/riscv-isa-sim/riscv/insns/fadd_q.h | 5 + vendor/riscv-isa-sim/riscv/insns/fadd_s.h | 5 + vendor/riscv-isa-sim/riscv/insns/fclass_d.h | 3 + vendor/riscv-isa-sim/riscv/insns/fclass_h.h | 3 + vendor/riscv-isa-sim/riscv/insns/fclass_q.h | 3 + vendor/riscv-isa-sim/riscv/insns/fclass_s.h | 3 + vendor/riscv-isa-sim/riscv/insns/fcvt_d_h.h | 6 + vendor/riscv-isa-sim/riscv/insns/fcvt_d_l.h | 6 + vendor/riscv-isa-sim/riscv/insns/fcvt_d_lu.h | 6 + vendor/riscv-isa-sim/riscv/insns/fcvt_d_q.h | 5 + vendor/riscv-isa-sim/riscv/insns/fcvt_d_s.h | 5 + vendor/riscv-isa-sim/riscv/insns/fcvt_d_w.h | 5 + vendor/riscv-isa-sim/riscv/insns/fcvt_d_wu.h | 5 + vendor/riscv-isa-sim/riscv/insns/fcvt_h_d.h | 6 + vendor/riscv-isa-sim/riscv/insns/fcvt_h_l.h | 6 + vendor/riscv-isa-sim/riscv/insns/fcvt_h_lu.h | 6 + vendor/riscv-isa-sim/riscv/insns/fcvt_h_q.h | 6 + vendor/riscv-isa-sim/riscv/insns/fcvt_h_s.h | 5 + vendor/riscv-isa-sim/riscv/insns/fcvt_h_w.h | 5 + vendor/riscv-isa-sim/riscv/insns/fcvt_h_wu.h | 5 + vendor/riscv-isa-sim/riscv/insns/fcvt_l_d.h | 6 + vendor/riscv-isa-sim/riscv/insns/fcvt_l_h.h | 6 + vendor/riscv-isa-sim/riscv/insns/fcvt_l_q.h | 6 + vendor/riscv-isa-sim/riscv/insns/fcvt_l_s.h | 6 + vendor/riscv-isa-sim/riscv/insns/fcvt_lu_d.h | 6 + vendor/riscv-isa-sim/riscv/insns/fcvt_lu_h.h | 6 + vendor/riscv-isa-sim/riscv/insns/fcvt_lu_q.h | 6 + vendor/riscv-isa-sim/riscv/insns/fcvt_lu_s.h | 6 + vendor/riscv-isa-sim/riscv/insns/fcvt_q_d.h | 5 + vendor/riscv-isa-sim/riscv/insns/fcvt_q_h.h | 6 + vendor/riscv-isa-sim/riscv/insns/fcvt_q_l.h | 6 + vendor/riscv-isa-sim/riscv/insns/fcvt_q_lu.h | 6 + vendor/riscv-isa-sim/riscv/insns/fcvt_q_s.h | 5 + vendor/riscv-isa-sim/riscv/insns/fcvt_q_w.h | 5 + vendor/riscv-isa-sim/riscv/insns/fcvt_q_wu.h | 5 + vendor/riscv-isa-sim/riscv/insns/fcvt_s_d.h | 5 + vendor/riscv-isa-sim/riscv/insns/fcvt_s_h.h | 5 + vendor/riscv-isa-sim/riscv/insns/fcvt_s_l.h | 6 + vendor/riscv-isa-sim/riscv/insns/fcvt_s_lu.h | 6 + vendor/riscv-isa-sim/riscv/insns/fcvt_s_q.h | 5 + vendor/riscv-isa-sim/riscv/insns/fcvt_s_w.h | 5 + vendor/riscv-isa-sim/riscv/insns/fcvt_s_wu.h | 5 + vendor/riscv-isa-sim/riscv/insns/fcvt_w_d.h | 5 + vendor/riscv-isa-sim/riscv/insns/fcvt_w_h.h | 5 + vendor/riscv-isa-sim/riscv/insns/fcvt_w_q.h | 5 + vendor/riscv-isa-sim/riscv/insns/fcvt_w_s.h | 5 + vendor/riscv-isa-sim/riscv/insns/fcvt_wu_d.h | 5 + vendor/riscv-isa-sim/riscv/insns/fcvt_wu_h.h | 5 + vendor/riscv-isa-sim/riscv/insns/fcvt_wu_q.h | 5 + vendor/riscv-isa-sim/riscv/insns/fcvt_wu_s.h | 5 + vendor/riscv-isa-sim/riscv/insns/fdiv_d.h | 5 + vendor/riscv-isa-sim/riscv/insns/fdiv_h.h | 5 + vendor/riscv-isa-sim/riscv/insns/fdiv_q.h | 5 + vendor/riscv-isa-sim/riscv/insns/fdiv_s.h | 5 + vendor/riscv-isa-sim/riscv/insns/fence.h | 0 vendor/riscv-isa-sim/riscv/insns/fence_i.h | 1 + vendor/riscv-isa-sim/riscv/insns/feq_d.h | 4 + vendor/riscv-isa-sim/riscv/insns/feq_h.h | 4 + vendor/riscv-isa-sim/riscv/insns/feq_q.h | 4 + vendor/riscv-isa-sim/riscv/insns/feq_s.h | 4 + vendor/riscv-isa-sim/riscv/insns/fld.h | 3 + vendor/riscv-isa-sim/riscv/insns/fle_d.h | 4 + vendor/riscv-isa-sim/riscv/insns/fle_h.h | 4 + vendor/riscv-isa-sim/riscv/insns/fle_q.h | 4 + vendor/riscv-isa-sim/riscv/insns/fle_s.h | 4 + vendor/riscv-isa-sim/riscv/insns/flh.h | 3 + vendor/riscv-isa-sim/riscv/insns/flq.h | 3 + vendor/riscv-isa-sim/riscv/insns/flt_d.h | 4 + vendor/riscv-isa-sim/riscv/insns/flt_h.h | 4 + vendor/riscv-isa-sim/riscv/insns/flt_q.h | 4 + vendor/riscv-isa-sim/riscv/insns/flt_s.h | 4 + vendor/riscv-isa-sim/riscv/insns/flw.h | 3 + vendor/riscv-isa-sim/riscv/insns/fmadd_d.h | 5 + vendor/riscv-isa-sim/riscv/insns/fmadd_h.h | 5 + vendor/riscv-isa-sim/riscv/insns/fmadd_q.h | 5 + vendor/riscv-isa-sim/riscv/insns/fmadd_s.h | 5 + vendor/riscv-isa-sim/riscv/insns/fmax_d.h | 9 + vendor/riscv-isa-sim/riscv/insns/fmax_h.h | 4 + vendor/riscv-isa-sim/riscv/insns/fmax_q.h | 9 + vendor/riscv-isa-sim/riscv/insns/fmax_s.h | 9 + vendor/riscv-isa-sim/riscv/insns/fmin_d.h | 9 + vendor/riscv-isa-sim/riscv/insns/fmin_h.h | 4 + vendor/riscv-isa-sim/riscv/insns/fmin_q.h | 9 + vendor/riscv-isa-sim/riscv/insns/fmin_s.h | 9 + vendor/riscv-isa-sim/riscv/insns/fmsub_d.h | 5 + vendor/riscv-isa-sim/riscv/insns/fmsub_h.h | 5 + vendor/riscv-isa-sim/riscv/insns/fmsub_q.h | 5 + vendor/riscv-isa-sim/riscv/insns/fmsub_s.h | 5 + vendor/riscv-isa-sim/riscv/insns/fmul_d.h | 5 + vendor/riscv-isa-sim/riscv/insns/fmul_h.h | 5 + vendor/riscv-isa-sim/riscv/insns/fmul_q.h | 5 + vendor/riscv-isa-sim/riscv/insns/fmul_s.h | 5 + vendor/riscv-isa-sim/riscv/insns/fmv_d_x.h | 4 + vendor/riscv-isa-sim/riscv/insns/fmv_h_x.h | 3 + vendor/riscv-isa-sim/riscv/insns/fmv_w_x.h | 3 + vendor/riscv-isa-sim/riscv/insns/fmv_x_d.h | 4 + vendor/riscv-isa-sim/riscv/insns/fmv_x_h.h | 3 + vendor/riscv-isa-sim/riscv/insns/fmv_x_w.h | 3 + vendor/riscv-isa-sim/riscv/insns/fnmadd_d.h | 5 + vendor/riscv-isa-sim/riscv/insns/fnmadd_h.h | 5 + vendor/riscv-isa-sim/riscv/insns/fnmadd_q.h | 5 + vendor/riscv-isa-sim/riscv/insns/fnmadd_s.h | 5 + vendor/riscv-isa-sim/riscv/insns/fnmsub_d.h | 5 + vendor/riscv-isa-sim/riscv/insns/fnmsub_h.h | 5 + vendor/riscv-isa-sim/riscv/insns/fnmsub_q.h | 5 + vendor/riscv-isa-sim/riscv/insns/fnmsub_s.h | 5 + vendor/riscv-isa-sim/riscv/insns/fsd.h | 3 + vendor/riscv-isa-sim/riscv/insns/fsgnj_d.h | 3 + vendor/riscv-isa-sim/riscv/insns/fsgnj_h.h | 3 + vendor/riscv-isa-sim/riscv/insns/fsgnj_q.h | 3 + vendor/riscv-isa-sim/riscv/insns/fsgnj_s.h | 3 + vendor/riscv-isa-sim/riscv/insns/fsgnjn_d.h | 3 + vendor/riscv-isa-sim/riscv/insns/fsgnjn_h.h | 3 + vendor/riscv-isa-sim/riscv/insns/fsgnjn_q.h | 3 + vendor/riscv-isa-sim/riscv/insns/fsgnjn_s.h | 3 + vendor/riscv-isa-sim/riscv/insns/fsgnjx_d.h | 3 + vendor/riscv-isa-sim/riscv/insns/fsgnjx_h.h | 3 + vendor/riscv-isa-sim/riscv/insns/fsgnjx_q.h | 3 + vendor/riscv-isa-sim/riscv/insns/fsgnjx_s.h | 3 + vendor/riscv-isa-sim/riscv/insns/fsh.h | 3 + vendor/riscv-isa-sim/riscv/insns/fsl.h | 9 + vendor/riscv-isa-sim/riscv/insns/fslw.h | 10 + vendor/riscv-isa-sim/riscv/insns/fsq.h | 3 + vendor/riscv-isa-sim/riscv/insns/fsqrt_d.h | 5 + vendor/riscv-isa-sim/riscv/insns/fsqrt_h.h | 5 + vendor/riscv-isa-sim/riscv/insns/fsqrt_q.h | 5 + vendor/riscv-isa-sim/riscv/insns/fsqrt_s.h | 5 + vendor/riscv-isa-sim/riscv/insns/fsr.h | 9 + vendor/riscv-isa-sim/riscv/insns/fsri.h | 9 + vendor/riscv-isa-sim/riscv/insns/fsriw.h | 10 + vendor/riscv-isa-sim/riscv/insns/fsrw.h | 10 + vendor/riscv-isa-sim/riscv/insns/fsub_d.h | 5 + vendor/riscv-isa-sim/riscv/insns/fsub_h.h | 5 + vendor/riscv-isa-sim/riscv/insns/fsub_q.h | 5 + vendor/riscv-isa-sim/riscv/insns/fsub_s.h | 5 + vendor/riscv-isa-sim/riscv/insns/fsw.h | 3 + vendor/riscv-isa-sim/riscv/insns/gorc.h | 10 + vendor/riscv-isa-sim/riscv/insns/gorci.h | 13 + vendor/riscv-isa-sim/riscv/insns/gorciw.h | 11 + vendor/riscv-isa-sim/riscv/insns/gorcw.h | 10 + vendor/riscv-isa-sim/riscv/insns/grev.h | 10 + vendor/riscv-isa-sim/riscv/insns/grevi.h | 17 + vendor/riscv-isa-sim/riscv/insns/greviw.h | 11 + vendor/riscv-isa-sim/riscv/insns/grevw.h | 10 + .../riscv-isa-sim/riscv/insns/hfence_gvma.h | 4 + .../riscv-isa-sim/riscv/insns/hfence_vvma.h | 4 + .../riscv-isa-sim/riscv/insns/hinval_gvma.h | 2 + .../riscv-isa-sim/riscv/insns/hinval_vvma.h | 2 + vendor/riscv-isa-sim/riscv/insns/hlv_b.h | 4 + vendor/riscv-isa-sim/riscv/insns/hlv_bu.h | 4 + vendor/riscv-isa-sim/riscv/insns/hlv_d.h | 5 + vendor/riscv-isa-sim/riscv/insns/hlv_h.h | 4 + vendor/riscv-isa-sim/riscv/insns/hlv_hu.h | 4 + vendor/riscv-isa-sim/riscv/insns/hlv_w.h | 4 + vendor/riscv-isa-sim/riscv/insns/hlv_wu.h | 5 + vendor/riscv-isa-sim/riscv/insns/hlvx_hu.h | 4 + vendor/riscv-isa-sim/riscv/insns/hlvx_wu.h | 4 + vendor/riscv-isa-sim/riscv/insns/hsv_b.h | 4 + vendor/riscv-isa-sim/riscv/insns/hsv_d.h | 5 + vendor/riscv-isa-sim/riscv/insns/hsv_h.h | 4 + vendor/riscv-isa-sim/riscv/insns/hsv_w.h | 4 + vendor/riscv-isa-sim/riscv/insns/insb.h | 4 + vendor/riscv-isa-sim/riscv/insns/jal.h | 3 + vendor/riscv-isa-sim/riscv/insns/jalr.h | 3 + vendor/riscv-isa-sim/riscv/insns/kabs16.h | 10 + vendor/riscv-isa-sim/riscv/insns/kabs32.h | 11 + vendor/riscv-isa-sim/riscv/insns/kabs8.h | 10 + vendor/riscv-isa-sim/riscv/insns/kabsw.h | 10 + vendor/riscv-isa-sim/riscv/insns/kadd16.h | 6 + vendor/riscv-isa-sim/riscv/insns/kadd32.h | 7 + vendor/riscv-isa-sim/riscv/insns/kadd64.h | 6 + vendor/riscv-isa-sim/riscv/insns/kadd8.h | 6 + vendor/riscv-isa-sim/riscv/insns/kaddh.h | 5 + vendor/riscv-isa-sim/riscv/insns/kaddw.h | 5 + vendor/riscv-isa-sim/riscv/insns/kcras16.h | 10 + vendor/riscv-isa-sim/riscv/insns/kcras32.h | 11 + vendor/riscv-isa-sim/riscv/insns/kcrsa16.h | 10 + vendor/riscv-isa-sim/riscv/insns/kcrsa32.h | 11 + vendor/riscv-isa-sim/riscv/insns/kdmabb.h | 17 + vendor/riscv-isa-sim/riscv/insns/kdmabb16.h | 18 + vendor/riscv-isa-sim/riscv/insns/kdmabt.h | 17 + vendor/riscv-isa-sim/riscv/insns/kdmabt16.h | 18 + vendor/riscv-isa-sim/riscv/insns/kdmatt.h | 17 + vendor/riscv-isa-sim/riscv/insns/kdmatt16.h | 18 + vendor/riscv-isa-sim/riscv/insns/kdmbb.h | 13 + vendor/riscv-isa-sim/riscv/insns/kdmbb16.h | 13 + vendor/riscv-isa-sim/riscv/insns/kdmbt.h | 13 + vendor/riscv-isa-sim/riscv/insns/kdmbt16.h | 13 + vendor/riscv-isa-sim/riscv/insns/kdmtt.h | 13 + vendor/riscv-isa-sim/riscv/insns/kdmtt16.h | 13 + vendor/riscv-isa-sim/riscv/insns/khm16.h | 9 + vendor/riscv-isa-sim/riscv/insns/khm8.h | 9 + vendor/riscv-isa-sim/riscv/insns/khmbb.h | 13 + vendor/riscv-isa-sim/riscv/insns/khmbb16.h | 14 + vendor/riscv-isa-sim/riscv/insns/khmbt.h | 13 + vendor/riscv-isa-sim/riscv/insns/khmbt16.h | 14 + vendor/riscv-isa-sim/riscv/insns/khmtt.h | 13 + vendor/riscv-isa-sim/riscv/insns/khmtt16.h | 14 + vendor/riscv-isa-sim/riscv/insns/khmx16.h | 9 + vendor/riscv-isa-sim/riscv/insns/khmx8.h | 9 + vendor/riscv-isa-sim/riscv/insns/kmabb.h | 7 + vendor/riscv-isa-sim/riscv/insns/kmabb32.h | 8 + vendor/riscv-isa-sim/riscv/insns/kmabt.h | 7 + vendor/riscv-isa-sim/riscv/insns/kmabt32.h | 8 + vendor/riscv-isa-sim/riscv/insns/kmada.h | 4 + vendor/riscv-isa-sim/riscv/insns/kmadrs.h | 7 + vendor/riscv-isa-sim/riscv/insns/kmadrs32.h | 10 + vendor/riscv-isa-sim/riscv/insns/kmads.h | 7 + vendor/riscv-isa-sim/riscv/insns/kmads32.h | 10 + vendor/riscv-isa-sim/riscv/insns/kmar64.h | 16 + vendor/riscv-isa-sim/riscv/insns/kmatt.h | 7 + vendor/riscv-isa-sim/riscv/insns/kmatt32.h | 8 + vendor/riscv-isa-sim/riscv/insns/kmaxda.h | 4 + vendor/riscv-isa-sim/riscv/insns/kmaxda32.h | 10 + vendor/riscv-isa-sim/riscv/insns/kmaxds.h | 7 + vendor/riscv-isa-sim/riscv/insns/kmaxds32.h | 10 + vendor/riscv-isa-sim/riscv/insns/kmda.h | 4 + vendor/riscv-isa-sim/riscv/insns/kmda32.h | 10 + vendor/riscv-isa-sim/riscv/insns/kmmac.h | 7 + vendor/riscv-isa-sim/riscv/insns/kmmac_u.h | 8 + vendor/riscv-isa-sim/riscv/insns/kmmawb.h | 7 + vendor/riscv-isa-sim/riscv/insns/kmmawb2.h | 15 + vendor/riscv-isa-sim/riscv/insns/kmmawb2_u.h | 15 + vendor/riscv-isa-sim/riscv/insns/kmmawb_u.h | 8 + vendor/riscv-isa-sim/riscv/insns/kmmawt.h | 7 + vendor/riscv-isa-sim/riscv/insns/kmmawt2.h | 15 + vendor/riscv-isa-sim/riscv/insns/kmmawt2_u.h | 15 + vendor/riscv-isa-sim/riscv/insns/kmmawt_u.h | 8 + vendor/riscv-isa-sim/riscv/insns/kmmsb.h | 7 + vendor/riscv-isa-sim/riscv/insns/kmmsb_u.h | 8 + vendor/riscv-isa-sim/riscv/insns/kmmwb2.h | 10 + vendor/riscv-isa-sim/riscv/insns/kmmwb2_u.h | 10 + vendor/riscv-isa-sim/riscv/insns/kmmwt2.h | 10 + vendor/riscv-isa-sim/riscv/insns/kmmwt2_u.h | 10 + vendor/riscv-isa-sim/riscv/insns/kmsda.h | 4 + vendor/riscv-isa-sim/riscv/insns/kmsda32.h | 10 + vendor/riscv-isa-sim/riscv/insns/kmsr64.h | 26 + vendor/riscv-isa-sim/riscv/insns/kmsxda.h | 4 + vendor/riscv-isa-sim/riscv/insns/kmsxda32.h | 10 + vendor/riscv-isa-sim/riscv/insns/kmxda.h | 4 + vendor/riscv-isa-sim/riscv/insns/kmxda32.h | 10 + vendor/riscv-isa-sim/riscv/insns/ksll16.h | 6 + vendor/riscv-isa-sim/riscv/insns/ksll32.h | 7 + vendor/riscv-isa-sim/riscv/insns/ksll8.h | 6 + vendor/riscv-isa-sim/riscv/insns/kslli16.h | 6 + vendor/riscv-isa-sim/riscv/insns/kslli32.h | 7 + vendor/riscv-isa-sim/riscv/insns/kslli8.h | 6 + vendor/riscv-isa-sim/riscv/insns/kslliw.h | 8 + vendor/riscv-isa-sim/riscv/insns/ksllw.h | 8 + vendor/riscv-isa-sim/riscv/insns/kslra16.h | 12 + vendor/riscv-isa-sim/riscv/insns/kslra16_u.h | 15 + vendor/riscv-isa-sim/riscv/insns/kslra32.h | 13 + vendor/riscv-isa-sim/riscv/insns/kslra32_u.h | 16 + vendor/riscv-isa-sim/riscv/insns/kslra8.h | 12 + vendor/riscv-isa-sim/riscv/insns/kslra8_u.h | 15 + vendor/riscv-isa-sim/riscv/insns/kslraw.h | 14 + vendor/riscv-isa-sim/riscv/insns/kslraw_u.h | 14 + vendor/riscv-isa-sim/riscv/insns/kstas16.h | 10 + vendor/riscv-isa-sim/riscv/insns/kstas32.h | 11 + vendor/riscv-isa-sim/riscv/insns/kstsa16.h | 10 + vendor/riscv-isa-sim/riscv/insns/kstsa32.h | 11 + vendor/riscv-isa-sim/riscv/insns/ksub16.h | 6 + vendor/riscv-isa-sim/riscv/insns/ksub32.h | 7 + vendor/riscv-isa-sim/riscv/insns/ksub64.h | 6 + vendor/riscv-isa-sim/riscv/insns/ksub8.h | 6 + vendor/riscv-isa-sim/riscv/insns/ksubh.h | 5 + vendor/riscv-isa-sim/riscv/insns/ksubw.h | 5 + vendor/riscv-isa-sim/riscv/insns/kwmmul.h | 10 + vendor/riscv-isa-sim/riscv/insns/kwmmul_u.h | 10 + vendor/riscv-isa-sim/riscv/insns/lb.h | 1 + vendor/riscv-isa-sim/riscv/insns/lbu.h | 1 + vendor/riscv-isa-sim/riscv/insns/ld.h | 2 + vendor/riscv-isa-sim/riscv/insns/lh.h | 1 + vendor/riscv-isa-sim/riscv/insns/lhu.h | 1 + vendor/riscv-isa-sim/riscv/insns/lr_d.h | 5 + vendor/riscv-isa-sim/riscv/insns/lr_w.h | 4 + vendor/riscv-isa-sim/riscv/insns/lui.h | 1 + vendor/riscv-isa-sim/riscv/insns/lw.h | 1 + vendor/riscv-isa-sim/riscv/insns/lwu.h | 2 + vendor/riscv-isa-sim/riscv/insns/maddr32.h | 5 + vendor/riscv-isa-sim/riscv/insns/max.h | 2 + vendor/riscv-isa-sim/riscv/insns/maxu.h | 2 + vendor/riscv-isa-sim/riscv/insns/min.h | 2 + vendor/riscv-isa-sim/riscv/insns/minu.h | 2 + vendor/riscv-isa-sim/riscv/insns/mret.h | 14 + vendor/riscv-isa-sim/riscv/insns/msubr32.h | 5 + vendor/riscv-isa-sim/riscv/insns/mul.h | 2 + vendor/riscv-isa-sim/riscv/insns/mulh.h | 5 + vendor/riscv-isa-sim/riscv/insns/mulhsu.h | 5 + vendor/riscv-isa-sim/riscv/insns/mulhu.h | 5 + vendor/riscv-isa-sim/riscv/insns/mulr64.h | 3 + vendor/riscv-isa-sim/riscv/insns/mulsr64.h | 3 + vendor/riscv-isa-sim/riscv/insns/mulw.h | 3 + vendor/riscv-isa-sim/riscv/insns/or.h | 1 + vendor/riscv-isa-sim/riscv/insns/ori.h | 2 + vendor/riscv-isa-sim/riscv/insns/orn.h | 2 + vendor/riscv-isa-sim/riscv/insns/pack.h | 11 + vendor/riscv-isa-sim/riscv/insns/packh.h | 7 + vendor/riscv-isa-sim/riscv/insns/packu.h | 6 + vendor/riscv-isa-sim/riscv/insns/packuw.h | 5 + vendor/riscv-isa-sim/riscv/insns/packw.h | 10 + vendor/riscv-isa-sim/riscv/insns/pbsad.h | 3 + vendor/riscv-isa-sim/riscv/insns/pbsada.h | 3 + vendor/riscv-isa-sim/riscv/insns/pkbb16.h | 2 + vendor/riscv-isa-sim/riscv/insns/pkbt16.h | 1 + vendor/riscv-isa-sim/riscv/insns/pkbt32.h | 2 + vendor/riscv-isa-sim/riscv/insns/pktb16.h | 1 + vendor/riscv-isa-sim/riscv/insns/pktb32.h | 2 + vendor/riscv-isa-sim/riscv/insns/pktt16.h | 2 + vendor/riscv-isa-sim/riscv/insns/radd16.h | 3 + vendor/riscv-isa-sim/riscv/insns/radd32.h | 4 + vendor/riscv-isa-sim/riscv/insns/radd64.h | 8 + vendor/riscv-isa-sim/riscv/insns/radd8.h | 3 + vendor/riscv-isa-sim/riscv/insns/raddw.h | 4 + vendor/riscv-isa-sim/riscv/insns/rcras16.h | 5 + vendor/riscv-isa-sim/riscv/insns/rcras32.h | 6 + vendor/riscv-isa-sim/riscv/insns/rcrsa16.h | 5 + vendor/riscv-isa-sim/riscv/insns/rcrsa32.h | 6 + vendor/riscv-isa-sim/riscv/insns/rem.h | 9 + vendor/riscv-isa-sim/riscv/insns/remu.h | 7 + vendor/riscv-isa-sim/riscv/insns/remuw.h | 8 + vendor/riscv-isa-sim/riscv/insns/remw.h | 8 + vendor/riscv-isa-sim/riscv/insns/rol.h | 4 + vendor/riscv-isa-sim/riscv/insns/rolw.h | 5 + vendor/riscv-isa-sim/riscv/insns/ror.h | 4 + vendor/riscv-isa-sim/riscv/insns/rori.h | 5 + vendor/riscv-isa-sim/riscv/insns/roriw.h | 6 + vendor/riscv-isa-sim/riscv/insns/rorw.h | 5 + vendor/riscv-isa-sim/riscv/insns/rstas16.h | 5 + vendor/riscv-isa-sim/riscv/insns/rstas32.h | 6 + vendor/riscv-isa-sim/riscv/insns/rstsa16.h | 5 + vendor/riscv-isa-sim/riscv/insns/rstsa32.h | 6 + vendor/riscv-isa-sim/riscv/insns/rsub16.h | 3 + vendor/riscv-isa-sim/riscv/insns/rsub32.h | 4 + vendor/riscv-isa-sim/riscv/insns/rsub64.h | 8 + vendor/riscv-isa-sim/riscv/insns/rsub8.h | 3 + vendor/riscv-isa-sim/riscv/insns/rsubw.h | 4 + vendor/riscv-isa-sim/riscv/insns/sb.h | 1 + vendor/riscv-isa-sim/riscv/insns/sc_d.h | 11 + vendor/riscv-isa-sim/riscv/insns/sc_w.h | 10 + vendor/riscv-isa-sim/riscv/insns/sclip16.h | 14 + vendor/riscv-isa-sim/riscv/insns/sclip32.h | 14 + vendor/riscv-isa-sim/riscv/insns/sclip8.h | 14 + vendor/riscv-isa-sim/riscv/insns/scmple16.h | 3 + vendor/riscv-isa-sim/riscv/insns/scmple8.h | 3 + vendor/riscv-isa-sim/riscv/insns/scmplt16.h | 3 + vendor/riscv-isa-sim/riscv/insns/scmplt8.h | 3 + vendor/riscv-isa-sim/riscv/insns/sd.h | 2 + vendor/riscv-isa-sim/riscv/insns/sext_b.h | 2 + vendor/riscv-isa-sim/riscv/insns/sext_h.h | 2 + .../riscv/insns/sfence_inval_ir.h | 3 + vendor/riscv-isa-sim/riscv/insns/sfence_vma.h | 9 + .../riscv/insns/sfence_w_inval.h | 3 + vendor/riscv-isa-sim/riscv/insns/sh.h | 1 + vendor/riscv-isa-sim/riscv/insns/sh1add.h | 2 + vendor/riscv-isa-sim/riscv/insns/sh1add_uw.h | 3 + vendor/riscv-isa-sim/riscv/insns/sh2add.h | 2 + vendor/riscv-isa-sim/riscv/insns/sh2add_uw.h | 3 + vendor/riscv-isa-sim/riscv/insns/sh3add.h | 2 + vendor/riscv-isa-sim/riscv/insns/sh3add_uw.h | 3 + vendor/riscv-isa-sim/riscv/insns/sha256sig0.h | 13 + vendor/riscv-isa-sim/riscv/insns/sha256sig1.h | 13 + vendor/riscv-isa-sim/riscv/insns/sha256sum0.h | 13 + vendor/riscv-isa-sim/riscv/insns/sha256sum1.h | 13 + vendor/riscv-isa-sim/riscv/insns/sha512sig0.h | 13 + .../riscv-isa-sim/riscv/insns/sha512sig0h.h | 9 + .../riscv-isa-sim/riscv/insns/sha512sig0l.h | 9 + vendor/riscv-isa-sim/riscv/insns/sha512sig1.h | 13 + .../riscv-isa-sim/riscv/insns/sha512sig1h.h | 9 + .../riscv-isa-sim/riscv/insns/sha512sig1l.h | 9 + vendor/riscv-isa-sim/riscv/insns/sha512sum0.h | 13 + .../riscv-isa-sim/riscv/insns/sha512sum0r.h | 9 + vendor/riscv-isa-sim/riscv/insns/sha512sum1.h | 13 + .../riscv-isa-sim/riscv/insns/sha512sum1r.h | 9 + vendor/riscv-isa-sim/riscv/insns/shfl.h | 9 + vendor/riscv-isa-sim/riscv/insns/shfli.h | 12 + vendor/riscv-isa-sim/riscv/insns/shflw.h | 9 + vendor/riscv-isa-sim/riscv/insns/sinval_vma.h | 2 + vendor/riscv-isa-sim/riscv/insns/sll.h | 1 + vendor/riscv-isa-sim/riscv/insns/sll16.h | 3 + vendor/riscv-isa-sim/riscv/insns/sll32.h | 4 + vendor/riscv-isa-sim/riscv/insns/sll8.h | 3 + vendor/riscv-isa-sim/riscv/insns/slli.h | 2 + vendor/riscv-isa-sim/riscv/insns/slli16.h | 3 + vendor/riscv-isa-sim/riscv/insns/slli32.h | 4 + vendor/riscv-isa-sim/riscv/insns/slli8.h | 3 + vendor/riscv-isa-sim/riscv/insns/slli_uw.h | 3 + vendor/riscv-isa-sim/riscv/insns/slliw.h | 2 + vendor/riscv-isa-sim/riscv/insns/sllw.h | 2 + vendor/riscv-isa-sim/riscv/insns/slo.h | 2 + vendor/riscv-isa-sim/riscv/insns/sloi.h | 3 + vendor/riscv-isa-sim/riscv/insns/sloiw.h | 3 + vendor/riscv-isa-sim/riscv/insns/slow.h | 3 + vendor/riscv-isa-sim/riscv/insns/slt.h | 1 + vendor/riscv-isa-sim/riscv/insns/slti.h | 1 + vendor/riscv-isa-sim/riscv/insns/sltiu.h | 1 + vendor/riscv-isa-sim/riscv/insns/sltu.h | 1 + vendor/riscv-isa-sim/riscv/insns/sm3p0.h | 14 + vendor/riscv-isa-sim/riscv/insns/sm3p1.h | 14 + vendor/riscv-isa-sim/riscv/insns/sm4_common.h | 27 + vendor/riscv-isa-sim/riscv/insns/sm4ed.h | 22 + vendor/riscv-isa-sim/riscv/insns/sm4ks.h | 20 + vendor/riscv-isa-sim/riscv/insns/smal.h | 11 + vendor/riscv-isa-sim/riscv/insns/smalbb.h | 3 + vendor/riscv-isa-sim/riscv/insns/smalbt.h | 3 + vendor/riscv-isa-sim/riscv/insns/smalda.h | 3 + vendor/riscv-isa-sim/riscv/insns/smaldrs.h | 7 + vendor/riscv-isa-sim/riscv/insns/smalds.h | 7 + vendor/riscv-isa-sim/riscv/insns/smaltt.h | 3 + vendor/riscv-isa-sim/riscv/insns/smalxda.h | 4 + vendor/riscv-isa-sim/riscv/insns/smalxds.h | 4 + vendor/riscv-isa-sim/riscv/insns/smaqa.h | 3 + vendor/riscv-isa-sim/riscv/insns/smaqa_su.h | 3 + vendor/riscv-isa-sim/riscv/insns/smar64.h | 3 + vendor/riscv-isa-sim/riscv/insns/smax16.h | 3 + vendor/riscv-isa-sim/riscv/insns/smax32.h | 3 + vendor/riscv-isa-sim/riscv/insns/smax8.h | 3 + vendor/riscv-isa-sim/riscv/insns/smbb16.h | 3 + vendor/riscv-isa-sim/riscv/insns/smbt16.h | 3 + vendor/riscv-isa-sim/riscv/insns/smbt32.h | 3 + vendor/riscv-isa-sim/riscv/insns/smdrs.h | 6 + vendor/riscv-isa-sim/riscv/insns/smdrs32.h | 7 + vendor/riscv-isa-sim/riscv/insns/smds.h | 6 + vendor/riscv-isa-sim/riscv/insns/smds32.h | 7 + vendor/riscv-isa-sim/riscv/insns/smin16.h | 3 + vendor/riscv-isa-sim/riscv/insns/smin32.h | 3 + vendor/riscv-isa-sim/riscv/insns/smin8.h | 3 + vendor/riscv-isa-sim/riscv/insns/smmul.h | 4 + vendor/riscv-isa-sim/riscv/insns/smmul_u.h | 4 + vendor/riscv-isa-sim/riscv/insns/smmwb.h | 4 + vendor/riscv-isa-sim/riscv/insns/smmwb_u.h | 4 + vendor/riscv-isa-sim/riscv/insns/smmwt.h | 4 + vendor/riscv-isa-sim/riscv/insns/smmwt_u.h | 4 + vendor/riscv-isa-sim/riscv/insns/smslda.h | 3 + vendor/riscv-isa-sim/riscv/insns/smslxda.h | 4 + vendor/riscv-isa-sim/riscv/insns/smsr64.h | 3 + vendor/riscv-isa-sim/riscv/insns/smtt16.h | 3 + vendor/riscv-isa-sim/riscv/insns/smtt32.h | 3 + vendor/riscv-isa-sim/riscv/insns/smul16.h | 3 + vendor/riscv-isa-sim/riscv/insns/smul8.h | 3 + vendor/riscv-isa-sim/riscv/insns/smulx16.h | 3 + vendor/riscv-isa-sim/riscv/insns/smulx8.h | 3 + vendor/riscv-isa-sim/riscv/insns/smxds.h | 6 + vendor/riscv-isa-sim/riscv/insns/smxds32.h | 7 + vendor/riscv-isa-sim/riscv/insns/sra.h | 1 + vendor/riscv-isa-sim/riscv/insns/sra16.h | 3 + vendor/riscv-isa-sim/riscv/insns/sra16_u.h | 6 + vendor/riscv-isa-sim/riscv/insns/sra32.h | 4 + vendor/riscv-isa-sim/riscv/insns/sra32_u.h | 7 + vendor/riscv-isa-sim/riscv/insns/sra8.h | 3 + vendor/riscv-isa-sim/riscv/insns/sra8_u.h | 6 + vendor/riscv-isa-sim/riscv/insns/sra_u.h | 9 + vendor/riscv-isa-sim/riscv/insns/srai.h | 2 + vendor/riscv-isa-sim/riscv/insns/srai16.h | 3 + vendor/riscv-isa-sim/riscv/insns/srai16_u.h | 6 + vendor/riscv-isa-sim/riscv/insns/srai32.h | 4 + vendor/riscv-isa-sim/riscv/insns/srai32_u.h | 7 + vendor/riscv-isa-sim/riscv/insns/srai8.h | 3 + vendor/riscv-isa-sim/riscv/insns/srai8_u.h | 6 + vendor/riscv-isa-sim/riscv/insns/srai_u.h | 10 + vendor/riscv-isa-sim/riscv/insns/sraiw.h | 2 + vendor/riscv-isa-sim/riscv/insns/sraiw_u.h | 9 + vendor/riscv-isa-sim/riscv/insns/sraw.h | 2 + vendor/riscv-isa-sim/riscv/insns/sret.h | 27 + vendor/riscv-isa-sim/riscv/insns/srl.h | 1 + vendor/riscv-isa-sim/riscv/insns/srl16.h | 3 + vendor/riscv-isa-sim/riscv/insns/srl16_u.h | 7 + vendor/riscv-isa-sim/riscv/insns/srl32.h | 4 + vendor/riscv-isa-sim/riscv/insns/srl32_u.h | 8 + vendor/riscv-isa-sim/riscv/insns/srl8.h | 3 + vendor/riscv-isa-sim/riscv/insns/srl8_u.h | 7 + vendor/riscv-isa-sim/riscv/insns/srli.h | 2 + vendor/riscv-isa-sim/riscv/insns/srli16.h | 3 + vendor/riscv-isa-sim/riscv/insns/srli16_u.h | 7 + vendor/riscv-isa-sim/riscv/insns/srli32.h | 4 + vendor/riscv-isa-sim/riscv/insns/srli32_u.h | 8 + vendor/riscv-isa-sim/riscv/insns/srli8.h | 3 + vendor/riscv-isa-sim/riscv/insns/srli8_u.h | 7 + vendor/riscv-isa-sim/riscv/insns/srliw.h | 2 + vendor/riscv-isa-sim/riscv/insns/srlw.h | 2 + vendor/riscv-isa-sim/riscv/insns/sro.h | 2 + vendor/riscv-isa-sim/riscv/insns/sroi.h | 3 + vendor/riscv-isa-sim/riscv/insns/sroiw.h | 3 + vendor/riscv-isa-sim/riscv/insns/srow.h | 3 + vendor/riscv-isa-sim/riscv/insns/stas16.h | 5 + vendor/riscv-isa-sim/riscv/insns/stas32.h | 6 + vendor/riscv-isa-sim/riscv/insns/stsa16.h | 5 + vendor/riscv-isa-sim/riscv/insns/stsa32.h | 6 + vendor/riscv-isa-sim/riscv/insns/sub.h | 1 + vendor/riscv-isa-sim/riscv/insns/sub16.h | 3 + vendor/riscv-isa-sim/riscv/insns/sub32.h | 4 + vendor/riscv-isa-sim/riscv/insns/sub64.h | 3 + vendor/riscv-isa-sim/riscv/insns/sub8.h | 3 + vendor/riscv-isa-sim/riscv/insns/subw.h | 3 + vendor/riscv-isa-sim/riscv/insns/sunpkd810.h | 1 + vendor/riscv-isa-sim/riscv/insns/sunpkd820.h | 1 + vendor/riscv-isa-sim/riscv/insns/sunpkd830.h | 1 + vendor/riscv-isa-sim/riscv/insns/sunpkd831.h | 1 + vendor/riscv-isa-sim/riscv/insns/sunpkd832.h | 1 + vendor/riscv-isa-sim/riscv/insns/sw.h | 1 + vendor/riscv-isa-sim/riscv/insns/uclip16.h | 13 + vendor/riscv-isa-sim/riscv/insns/uclip32.h | 13 + vendor/riscv-isa-sim/riscv/insns/uclip8.h | 13 + vendor/riscv-isa-sim/riscv/insns/ucmple16.h | 3 + vendor/riscv-isa-sim/riscv/insns/ucmple8.h | 3 + vendor/riscv-isa-sim/riscv/insns/ucmplt16.h | 3 + vendor/riscv-isa-sim/riscv/insns/ucmplt8.h | 3 + vendor/riscv-isa-sim/riscv/insns/ukadd16.h | 6 + vendor/riscv-isa-sim/riscv/insns/ukadd32.h | 7 + vendor/riscv-isa-sim/riscv/insns/ukadd64.h | 6 + vendor/riscv-isa-sim/riscv/insns/ukadd8.h | 6 + vendor/riscv-isa-sim/riscv/insns/ukaddh.h | 5 + vendor/riscv-isa-sim/riscv/insns/ukaddw.h | 5 + vendor/riscv-isa-sim/riscv/insns/ukcras16.h | 10 + vendor/riscv-isa-sim/riscv/insns/ukcras32.h | 11 + vendor/riscv-isa-sim/riscv/insns/ukcrsa16.h | 10 + vendor/riscv-isa-sim/riscv/insns/ukcrsa32.h | 11 + vendor/riscv-isa-sim/riscv/insns/ukmar64.h | 6 + vendor/riscv-isa-sim/riscv/insns/ukmsr64.h | 6 + vendor/riscv-isa-sim/riscv/insns/ukstas16.h | 10 + vendor/riscv-isa-sim/riscv/insns/ukstas32.h | 11 + vendor/riscv-isa-sim/riscv/insns/ukstsa16.h | 10 + vendor/riscv-isa-sim/riscv/insns/ukstsa32.h | 11 + vendor/riscv-isa-sim/riscv/insns/uksub16.h | 6 + vendor/riscv-isa-sim/riscv/insns/uksub32.h | 7 + vendor/riscv-isa-sim/riscv/insns/uksub64.h | 6 + vendor/riscv-isa-sim/riscv/insns/uksub8.h | 6 + vendor/riscv-isa-sim/riscv/insns/uksubh.h | 5 + vendor/riscv-isa-sim/riscv/insns/uksubw.h | 5 + vendor/riscv-isa-sim/riscv/insns/umaqa.h | 3 + vendor/riscv-isa-sim/riscv/insns/umar64.h | 3 + vendor/riscv-isa-sim/riscv/insns/umax16.h | 3 + vendor/riscv-isa-sim/riscv/insns/umax32.h | 4 + vendor/riscv-isa-sim/riscv/insns/umax8.h | 3 + vendor/riscv-isa-sim/riscv/insns/umin16.h | 3 + vendor/riscv-isa-sim/riscv/insns/umin32.h | 4 + vendor/riscv-isa-sim/riscv/insns/umin8.h | 3 + vendor/riscv-isa-sim/riscv/insns/umsr64.h | 3 + vendor/riscv-isa-sim/riscv/insns/umul16.h | 3 + vendor/riscv-isa-sim/riscv/insns/umul8.h | 3 + vendor/riscv-isa-sim/riscv/insns/umulx16.h | 3 + vendor/riscv-isa-sim/riscv/insns/umulx8.h | 3 + vendor/riscv-isa-sim/riscv/insns/unshfl.h | 9 + vendor/riscv-isa-sim/riscv/insns/unshfli.h | 12 + vendor/riscv-isa-sim/riscv/insns/unshflw.h | 9 + vendor/riscv-isa-sim/riscv/insns/uradd16.h | 3 + vendor/riscv-isa-sim/riscv/insns/uradd32.h | 4 + vendor/riscv-isa-sim/riscv/insns/uradd64.h | 9 + vendor/riscv-isa-sim/riscv/insns/uradd8.h | 3 + vendor/riscv-isa-sim/riscv/insns/uraddw.h | 4 + vendor/riscv-isa-sim/riscv/insns/urcras16.h | 5 + vendor/riscv-isa-sim/riscv/insns/urcras32.h | 6 + vendor/riscv-isa-sim/riscv/insns/urcrsa16.h | 5 + vendor/riscv-isa-sim/riscv/insns/urcrsa32.h | 6 + vendor/riscv-isa-sim/riscv/insns/urstas16.h | 5 + vendor/riscv-isa-sim/riscv/insns/urstas32.h | 6 + vendor/riscv-isa-sim/riscv/insns/urstsa16.h | 5 + vendor/riscv-isa-sim/riscv/insns/urstsa32.h | 6 + vendor/riscv-isa-sim/riscv/insns/ursub16.h | 3 + vendor/riscv-isa-sim/riscv/insns/ursub32.h | 4 + vendor/riscv-isa-sim/riscv/insns/ursub64.h | 9 + vendor/riscv-isa-sim/riscv/insns/ursub8.h | 3 + vendor/riscv-isa-sim/riscv/insns/ursubw.h | 4 + vendor/riscv-isa-sim/riscv/insns/vaadd_vv.h | 2 + vendor/riscv-isa-sim/riscv/insns/vaadd_vx.h | 2 + vendor/riscv-isa-sim/riscv/insns/vaaddu_vv.h | 2 + vendor/riscv-isa-sim/riscv/insns/vaaddu_vx.h | 2 + vendor/riscv-isa-sim/riscv/insns/vadc_vim.h | 5 + vendor/riscv-isa-sim/riscv/insns/vadc_vvm.h | 5 + vendor/riscv-isa-sim/riscv/insns/vadc_vxm.h | 5 + vendor/riscv-isa-sim/riscv/insns/vadd_vi.h | 5 + vendor/riscv-isa-sim/riscv/insns/vadd_vv.h | 5 + vendor/riscv-isa-sim/riscv/insns/vadd_vx.h | 5 + .../riscv-isa-sim/riscv/insns/vamoaddei16_v.h | 2 + .../riscv-isa-sim/riscv/insns/vamoaddei32_v.h | 2 + .../riscv-isa-sim/riscv/insns/vamoaddei64_v.h | 2 + .../riscv-isa-sim/riscv/insns/vamoaddei8_v.h | 2 + .../riscv-isa-sim/riscv/insns/vamoandei16_v.h | 2 + .../riscv-isa-sim/riscv/insns/vamoandei32_v.h | 2 + .../riscv-isa-sim/riscv/insns/vamoandei64_v.h | 2 + .../riscv-isa-sim/riscv/insns/vamoandei8_v.h | 2 + .../riscv-isa-sim/riscv/insns/vamomaxei16_v.h | 2 + .../riscv-isa-sim/riscv/insns/vamomaxei32_v.h | 2 + .../riscv-isa-sim/riscv/insns/vamomaxei64_v.h | 2 + .../riscv-isa-sim/riscv/insns/vamomaxei8_v.h | 2 + .../riscv/insns/vamomaxuei16_v.h | 2 + .../riscv/insns/vamomaxuei32_v.h | 2 + .../riscv/insns/vamomaxuei64_v.h | 2 + .../riscv-isa-sim/riscv/insns/vamomaxuei8_v.h | 2 + .../riscv-isa-sim/riscv/insns/vamominei16_v.h | 2 + .../riscv-isa-sim/riscv/insns/vamominei32_v.h | 2 + .../riscv-isa-sim/riscv/insns/vamominei64_v.h | 2 + .../riscv-isa-sim/riscv/insns/vamominei8_v.h | 2 + .../riscv/insns/vamominuei16_v.h | 2 + .../riscv/insns/vamominuei32_v.h | 2 + .../riscv/insns/vamominuei64_v.h | 2 + .../riscv-isa-sim/riscv/insns/vamominuei8_v.h | 2 + .../riscv-isa-sim/riscv/insns/vamoorei16_v.h | 2 + .../riscv-isa-sim/riscv/insns/vamoorei32_v.h | 2 + .../riscv-isa-sim/riscv/insns/vamoorei64_v.h | 2 + .../riscv-isa-sim/riscv/insns/vamoorei8_v.h | 2 + .../riscv/insns/vamoswapei16_v.h | 2 + .../riscv/insns/vamoswapei32_v.h | 2 + .../riscv/insns/vamoswapei64_v.h | 2 + .../riscv-isa-sim/riscv/insns/vamoswapei8_v.h | 2 + .../riscv-isa-sim/riscv/insns/vamoxorei16_v.h | 2 + .../riscv-isa-sim/riscv/insns/vamoxorei32_v.h | 2 + .../riscv-isa-sim/riscv/insns/vamoxorei64_v.h | 2 + .../riscv-isa-sim/riscv/insns/vamoxorei8_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vand_vi.h | 5 + vendor/riscv-isa-sim/riscv/insns/vand_vv.h | 5 + vendor/riscv-isa-sim/riscv/insns/vand_vx.h | 5 + vendor/riscv-isa-sim/riscv/insns/vasub_vv.h | 2 + vendor/riscv-isa-sim/riscv/insns/vasub_vx.h | 2 + vendor/riscv-isa-sim/riscv/insns/vasubu_vv.h | 2 + vendor/riscv-isa-sim/riscv/insns/vasubu_vx.h | 2 + .../riscv-isa-sim/riscv/insns/vcompress_vm.h | 33 + vendor/riscv-isa-sim/riscv/insns/vcpop_m.h | 23 + vendor/riscv-isa-sim/riscv/insns/vdiv_vv.h | 10 + vendor/riscv-isa-sim/riscv/insns/vdiv_vx.h | 10 + vendor/riscv-isa-sim/riscv/insns/vdivu_vv.h | 8 + vendor/riscv-isa-sim/riscv/insns/vdivu_vx.h | 8 + vendor/riscv-isa-sim/riscv/insns/vfadd_vf.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfadd_vv.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfclass_v.h | 11 + .../riscv-isa-sim/riscv/insns/vfcvt_f_x_v.h | 7 + .../riscv-isa-sim/riscv/insns/vfcvt_f_xu_v.h | 7 + .../riscv/insns/vfcvt_rtz_x_f_v.h | 7 + .../riscv/insns/vfcvt_rtz_xu_f_v.h | 7 + .../riscv-isa-sim/riscv/insns/vfcvt_x_f_v.h | 7 + .../riscv-isa-sim/riscv/insns/vfcvt_xu_f_v.h | 7 + vendor/riscv-isa-sim/riscv/insns/vfdiv_vf.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfdiv_vv.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfirst_m.h | 20 + vendor/riscv-isa-sim/riscv/insns/vfmacc_vf.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfmacc_vv.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfmadd_vf.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfmadd_vv.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfmax_vf.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfmax_vv.h | 11 + .../riscv-isa-sim/riscv/insns/vfmerge_vfm.h | 4 + vendor/riscv-isa-sim/riscv/insns/vfmin_vf.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfmin_vv.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfmsac_vf.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfmsac_vv.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfmsub_vf.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfmsub_vv.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfmul_vf.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfmul_vv.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfmv_f_s.h | 38 + vendor/riscv-isa-sim/riscv/insns/vfmv_s_f.h | 29 + vendor/riscv-isa-sim/riscv/insns/vfmv_v_f.h | 4 + .../riscv-isa-sim/riscv/insns/vfncvt_f_f_w.h | 9 + .../riscv-isa-sim/riscv/insns/vfncvt_f_x_w.h | 10 + .../riscv-isa-sim/riscv/insns/vfncvt_f_xu_w.h | 10 + .../riscv/insns/vfncvt_rod_f_f_w.h | 15 + .../riscv/insns/vfncvt_rtz_x_f_w.h | 10 + .../riscv/insns/vfncvt_rtz_xu_f_w.h | 10 + .../riscv-isa-sim/riscv/insns/vfncvt_x_f_w.h | 10 + .../riscv-isa-sim/riscv/insns/vfncvt_xu_f_w.h | 10 + vendor/riscv-isa-sim/riscv/insns/vfnmacc_vf.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfnmacc_vv.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfnmadd_vf.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfnmadd_vv.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfnmsac_vf.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfnmsac_vv.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfnmsub_vf.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfnmsub_vv.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfrdiv_vf.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfrec7_v.h | 11 + .../riscv-isa-sim/riscv/insns/vfredmax_vs.h | 12 + .../riscv-isa-sim/riscv/insns/vfredmin_vs.h | 12 + .../riscv-isa-sim/riscv/insns/vfredosum_vs.h | 12 + .../riscv-isa-sim/riscv/insns/vfredusum_vs.h | 12 + vendor/riscv-isa-sim/riscv/insns/vfrsqrt7_v.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfrsub_vf.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfsgnj_vf.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfsgnj_vv.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfsgnjn_vf.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfsgnjn_vv.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfsgnjx_vf.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfsgnjx_vv.h | 11 + .../riscv/insns/vfslide1down_vf.h | 36 + .../riscv-isa-sim/riscv/insns/vfslide1up_vf.h | 36 + vendor/riscv-isa-sim/riscv/insns/vfsqrt_v.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfsub_vf.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfsub_vv.h | 11 + vendor/riscv-isa-sim/riscv/insns/vfwadd_vf.h | 8 + vendor/riscv-isa-sim/riscv/insns/vfwadd_vv.h | 8 + vendor/riscv-isa-sim/riscv/insns/vfwadd_wf.h | 8 + vendor/riscv-isa-sim/riscv/insns/vfwadd_wv.h | 8 + .../riscv-isa-sim/riscv/insns/vfwcvt_f_f_v.h | 9 + .../riscv-isa-sim/riscv/insns/vfwcvt_f_x_v.h | 10 + .../riscv-isa-sim/riscv/insns/vfwcvt_f_xu_v.h | 10 + .../riscv/insns/vfwcvt_rtz_x_f_v.h | 10 + .../riscv/insns/vfwcvt_rtz_xu_f_v.h | 10 + .../riscv-isa-sim/riscv/insns/vfwcvt_x_f_v.h | 10 + .../riscv-isa-sim/riscv/insns/vfwcvt_xu_f_v.h | 10 + vendor/riscv-isa-sim/riscv/insns/vfwmacc_vf.h | 8 + vendor/riscv-isa-sim/riscv/insns/vfwmacc_vv.h | 8 + vendor/riscv-isa-sim/riscv/insns/vfwmsac_vf.h | 8 + vendor/riscv-isa-sim/riscv/insns/vfwmsac_vv.h | 8 + vendor/riscv-isa-sim/riscv/insns/vfwmul_vf.h | 8 + vendor/riscv-isa-sim/riscv/insns/vfwmul_vv.h | 8 + .../riscv-isa-sim/riscv/insns/vfwnmacc_vf.h | 8 + .../riscv-isa-sim/riscv/insns/vfwnmacc_vv.h | 8 + .../riscv-isa-sim/riscv/insns/vfwnmsac_vf.h | 8 + .../riscv-isa-sim/riscv/insns/vfwnmsac_vv.h | 8 + .../riscv-isa-sim/riscv/insns/vfwredosum_vs.h | 9 + .../riscv-isa-sim/riscv/insns/vfwredusum_vs.h | 9 + vendor/riscv-isa-sim/riscv/insns/vfwsub_vf.h | 8 + vendor/riscv-isa-sim/riscv/insns/vfwsub_vv.h | 8 + vendor/riscv-isa-sim/riscv/insns/vfwsub_wf.h | 8 + vendor/riscv-isa-sim/riscv/insns/vfwsub_wv.h | 8 + vendor/riscv-isa-sim/riscv/insns/vid_v.h | 31 + vendor/riscv-isa-sim/riscv/insns/viota_m.h | 53 + vendor/riscv-isa-sim/riscv/insns/vl1re16_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vl1re32_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vl1re64_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vl1re8_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vl2re16_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vl2re32_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vl2re64_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vl2re8_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vl4re16_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vl4re32_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vl4re64_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vl4re8_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vl8re16_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vl8re32_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vl8re64_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vl8re8_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vle16_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vle16ff_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vle32_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vle32ff_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vle64_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vle64ff_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vle8_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vle8ff_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vlm_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vloxei16_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vloxei32_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vloxei64_v.h | 3 + vendor/riscv-isa-sim/riscv/insns/vloxei8_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vlse16_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vlse32_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vlse64_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vlse8_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vluxei16_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vluxei32_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vluxei64_v.h | 3 + vendor/riscv-isa-sim/riscv/insns/vluxei8_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vmacc_vv.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmacc_vx.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmadc_vi.h | 2 + vendor/riscv-isa-sim/riscv/insns/vmadc_vim.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmadc_vv.h | 2 + vendor/riscv-isa-sim/riscv/insns/vmadc_vvm.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmadc_vx.h | 2 + vendor/riscv-isa-sim/riscv/insns/vmadc_vxm.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmadd_vv.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmadd_vx.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmand_mm.h | 2 + vendor/riscv-isa-sim/riscv/insns/vmandn_mm.h | 2 + vendor/riscv-isa-sim/riscv/insns/vmax_vv.h | 10 + vendor/riscv-isa-sim/riscv/insns/vmax_vx.h | 10 + vendor/riscv-isa-sim/riscv/insns/vmaxu_vv.h | 9 + vendor/riscv-isa-sim/riscv/insns/vmaxu_vx.h | 9 + vendor/riscv-isa-sim/riscv/insns/vmerge_vim.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmerge_vvm.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmerge_vxm.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmfeq_vf.h | 11 + vendor/riscv-isa-sim/riscv/insns/vmfeq_vv.h | 11 + vendor/riscv-isa-sim/riscv/insns/vmfge_vf.h | 11 + vendor/riscv-isa-sim/riscv/insns/vmfgt_vf.h | 11 + vendor/riscv-isa-sim/riscv/insns/vmfle_vf.h | 11 + vendor/riscv-isa-sim/riscv/insns/vmfle_vv.h | 11 + vendor/riscv-isa-sim/riscv/insns/vmflt_vf.h | 11 + vendor/riscv-isa-sim/riscv/insns/vmflt_vv.h | 11 + vendor/riscv-isa-sim/riscv/insns/vmfne_vf.h | 11 + vendor/riscv-isa-sim/riscv/insns/vmfne_vv.h | 11 + vendor/riscv-isa-sim/riscv/insns/vmin_vv.h | 11 + vendor/riscv-isa-sim/riscv/insns/vmin_vx.h | 11 + vendor/riscv-isa-sim/riscv/insns/vminu_vv.h | 9 + vendor/riscv-isa-sim/riscv/insns/vminu_vx.h | 10 + vendor/riscv-isa-sim/riscv/insns/vmnand_mm.h | 2 + vendor/riscv-isa-sim/riscv/insns/vmnor_mm.h | 2 + vendor/riscv-isa-sim/riscv/insns/vmor_mm.h | 2 + vendor/riscv-isa-sim/riscv/insns/vmorn_mm.h | 2 + vendor/riscv-isa-sim/riscv/insns/vmsbc_vv.h | 2 + vendor/riscv-isa-sim/riscv/insns/vmsbc_vvm.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmsbc_vx.h | 2 + vendor/riscv-isa-sim/riscv/insns/vmsbc_vxm.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmsbf_m.h | 32 + vendor/riscv-isa-sim/riscv/insns/vmseq_vi.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmseq_vv.h | 6 + vendor/riscv-isa-sim/riscv/insns/vmseq_vx.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmsgt_vi.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmsgt_vx.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmsgtu_vi.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmsgtu_vx.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmsif_m.h | 32 + vendor/riscv-isa-sim/riscv/insns/vmsle_vi.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmsle_vv.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmsle_vx.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmsleu_vi.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmsleu_vv.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmsleu_vx.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmslt_vv.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmslt_vx.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmsltu_vv.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmsltu_vx.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmsne_vi.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmsne_vv.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmsne_vx.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmsof_m.h | 30 + vendor/riscv-isa-sim/riscv/insns/vmul_vv.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmul_vx.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmulh_vv.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmulh_vx.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmulhsu_vv.h | 4 + vendor/riscv-isa-sim/riscv/insns/vmulhsu_vx.h | 4 + vendor/riscv-isa-sim/riscv/insns/vmulhu_vv.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmulhu_vx.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmv1r_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vmv2r_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vmv4r_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vmv8r_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vmv_s_x.h | 29 + vendor/riscv-isa-sim/riscv/insns/vmv_v_i.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmv_v_v.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmv_v_x.h | 5 + vendor/riscv-isa-sim/riscv/insns/vmv_x_s.h | 27 + vendor/riscv-isa-sim/riscv/insns/vmvnfr_v.h | 28 + vendor/riscv-isa-sim/riscv/insns/vmxnor_mm.h | 2 + vendor/riscv-isa-sim/riscv/insns/vmxor_mm.h | 2 + vendor/riscv-isa-sim/riscv/insns/vnclip_wi.h | 25 + vendor/riscv-isa-sim/riscv/insns/vnclip_wv.h | 25 + vendor/riscv-isa-sim/riscv/insns/vnclip_wx.h | 25 + vendor/riscv-isa-sim/riscv/insns/vnclipu_wi.h | 23 + vendor/riscv-isa-sim/riscv/insns/vnclipu_wv.h | 22 + vendor/riscv-isa-sim/riscv/insns/vnclipu_wx.h | 22 + vendor/riscv-isa-sim/riscv/insns/vnmsac_vv.h | 5 + vendor/riscv-isa-sim/riscv/insns/vnmsac_vx.h | 5 + vendor/riscv-isa-sim/riscv/insns/vnmsub_vv.h | 5 + vendor/riscv-isa-sim/riscv/insns/vnmsub_vx.h | 5 + vendor/riscv-isa-sim/riscv/insns/vnsra_wi.h | 5 + vendor/riscv-isa-sim/riscv/insns/vnsra_wv.h | 5 + vendor/riscv-isa-sim/riscv/insns/vnsra_wx.h | 5 + vendor/riscv-isa-sim/riscv/insns/vnsrl_wi.h | 5 + vendor/riscv-isa-sim/riscv/insns/vnsrl_wv.h | 5 + vendor/riscv-isa-sim/riscv/insns/vnsrl_wx.h | 5 + vendor/riscv-isa-sim/riscv/insns/vor_vi.h | 5 + vendor/riscv-isa-sim/riscv/insns/vor_vv.h | 5 + vendor/riscv-isa-sim/riscv/insns/vor_vx.h | 5 + vendor/riscv-isa-sim/riscv/insns/vredand_vs.h | 5 + vendor/riscv-isa-sim/riscv/insns/vredmax_vs.h | 5 + .../riscv-isa-sim/riscv/insns/vredmaxu_vs.h | 5 + vendor/riscv-isa-sim/riscv/insns/vredmin_vs.h | 5 + .../riscv-isa-sim/riscv/insns/vredminu_vs.h | 5 + vendor/riscv-isa-sim/riscv/insns/vredor_vs.h | 5 + vendor/riscv-isa-sim/riscv/insns/vredsum_vs.h | 5 + vendor/riscv-isa-sim/riscv/insns/vredxor_vs.h | 5 + vendor/riscv-isa-sim/riscv/insns/vrem_vv.h | 11 + vendor/riscv-isa-sim/riscv/insns/vrem_vx.h | 10 + vendor/riscv-isa-sim/riscv/insns/vremu_vv.h | 8 + vendor/riscv-isa-sim/riscv/insns/vremu_vx.h | 8 + .../riscv-isa-sim/riscv/insns/vrgather_vi.h | 30 + .../riscv-isa-sim/riscv/insns/vrgather_vv.h | 32 + .../riscv-isa-sim/riscv/insns/vrgather_vx.h | 24 + .../riscv/insns/vrgatherei16_vv.h | 34 + vendor/riscv-isa-sim/riscv/insns/vrsub_vi.h | 5 + vendor/riscv-isa-sim/riscv/insns/vrsub_vx.h | 5 + vendor/riscv-isa-sim/riscv/insns/vs1r_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vs2r_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vs4r_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vs8r_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vsadd_vi.h | 28 + vendor/riscv-isa-sim/riscv/insns/vsadd_vv.h | 28 + vendor/riscv-isa-sim/riscv/insns/vsadd_vx.h | 28 + vendor/riscv-isa-sim/riscv/insns/vsaddu_vi.h | 11 + vendor/riscv-isa-sim/riscv/insns/vsaddu_vv.h | 11 + vendor/riscv-isa-sim/riscv/insns/vsaddu_vx.h | 12 + vendor/riscv-isa-sim/riscv/insns/vsbc_vvm.h | 5 + vendor/riscv-isa-sim/riscv/insns/vsbc_vxm.h | 5 + vendor/riscv-isa-sim/riscv/insns/vse16_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vse32_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vse64_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vse8_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vsetivli.h | 2 + vendor/riscv-isa-sim/riscv/insns/vsetvl.h | 2 + vendor/riscv-isa-sim/riscv/insns/vsetvli.h | 2 + vendor/riscv-isa-sim/riscv/insns/vsext_vf2.h | 1 + vendor/riscv-isa-sim/riscv/insns/vsext_vf4.h | 1 + vendor/riscv-isa-sim/riscv/insns/vsext_vf8.h | 1 + .../riscv/insns/vslide1down_vx.h | 44 + .../riscv-isa-sim/riscv/insns/vslide1up_vx.h | 30 + .../riscv-isa-sim/riscv/insns/vslidedown_vi.h | 36 + .../riscv-isa-sim/riscv/insns/vslidedown_vx.h | 36 + .../riscv-isa-sim/riscv/insns/vslideup_vi.h | 31 + .../riscv-isa-sim/riscv/insns/vslideup_vx.h | 31 + vendor/riscv-isa-sim/riscv/insns/vsll_vi.h | 5 + vendor/riscv-isa-sim/riscv/insns/vsll_vv.h | 5 + vendor/riscv-isa-sim/riscv/insns/vsll_vx.h | 5 + vendor/riscv-isa-sim/riscv/insns/vsm_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vsmul_vv.h | 32 + vendor/riscv-isa-sim/riscv/insns/vsmul_vx.h | 33 + vendor/riscv-isa-sim/riscv/insns/vsoxei16_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vsoxei32_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vsoxei64_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vsoxei8_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vsra_vi.h | 5 + vendor/riscv-isa-sim/riscv/insns/vsra_vv.h | 5 + vendor/riscv-isa-sim/riscv/insns/vsra_vx.h | 5 + vendor/riscv-isa-sim/riscv/insns/vsrl_vi.h | 5 + vendor/riscv-isa-sim/riscv/insns/vsrl_vv.h | 5 + vendor/riscv-isa-sim/riscv/insns/vsrl_vx.h | 5 + vendor/riscv-isa-sim/riscv/insns/vsse16_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vsse32_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vsse64_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vsse8_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vssra_vi.h | 10 + vendor/riscv-isa-sim/riscv/insns/vssra_vv.h | 10 + vendor/riscv-isa-sim/riscv/insns/vssra_vx.h | 10 + vendor/riscv-isa-sim/riscv/insns/vssrl_vi.h | 10 + vendor/riscv-isa-sim/riscv/insns/vssrl_vv.h | 10 + vendor/riscv-isa-sim/riscv/insns/vssrl_vx.h | 10 + vendor/riscv-isa-sim/riscv/insns/vssub_vv.h | 29 + vendor/riscv-isa-sim/riscv/insns/vssub_vx.h | 29 + vendor/riscv-isa-sim/riscv/insns/vssubu_vv.h | 30 + vendor/riscv-isa-sim/riscv/insns/vssubu_vx.h | 29 + vendor/riscv-isa-sim/riscv/insns/vsub_vv.h | 5 + vendor/riscv-isa-sim/riscv/insns/vsub_vx.h | 5 + vendor/riscv-isa-sim/riscv/insns/vsuxei16_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vsuxei32_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vsuxei64_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vsuxei8_v.h | 2 + vendor/riscv-isa-sim/riscv/insns/vwadd_vv.h | 6 + vendor/riscv-isa-sim/riscv/insns/vwadd_vx.h | 6 + vendor/riscv-isa-sim/riscv/insns/vwadd_wv.h | 6 + vendor/riscv-isa-sim/riscv/insns/vwadd_wx.h | 6 + vendor/riscv-isa-sim/riscv/insns/vwaddu_vv.h | 6 + vendor/riscv-isa-sim/riscv/insns/vwaddu_vx.h | 6 + vendor/riscv-isa-sim/riscv/insns/vwaddu_wv.h | 6 + vendor/riscv-isa-sim/riscv/insns/vwaddu_wx.h | 6 + vendor/riscv-isa-sim/riscv/insns/vwmacc_vv.h | 6 + vendor/riscv-isa-sim/riscv/insns/vwmacc_vx.h | 6 + .../riscv-isa-sim/riscv/insns/vwmaccsu_vv.h | 6 + .../riscv-isa-sim/riscv/insns/vwmaccsu_vx.h | 6 + vendor/riscv-isa-sim/riscv/insns/vwmaccu_vv.h | 6 + vendor/riscv-isa-sim/riscv/insns/vwmaccu_vx.h | 6 + .../riscv-isa-sim/riscv/insns/vwmaccus_vx.h | 6 + vendor/riscv-isa-sim/riscv/insns/vwmul_vv.h | 6 + vendor/riscv-isa-sim/riscv/insns/vwmul_vx.h | 6 + vendor/riscv-isa-sim/riscv/insns/vwmulsu_vv.h | 6 + vendor/riscv-isa-sim/riscv/insns/vwmulsu_vx.h | 6 + vendor/riscv-isa-sim/riscv/insns/vwmulu_vv.h | 6 + vendor/riscv-isa-sim/riscv/insns/vwmulu_vx.h | 6 + .../riscv-isa-sim/riscv/insns/vwredsum_vs.h | 5 + .../riscv-isa-sim/riscv/insns/vwredsumu_vs.h | 5 + vendor/riscv-isa-sim/riscv/insns/vwsub_vv.h | 6 + vendor/riscv-isa-sim/riscv/insns/vwsub_vx.h | 6 + vendor/riscv-isa-sim/riscv/insns/vwsub_wv.h | 6 + vendor/riscv-isa-sim/riscv/insns/vwsub_wx.h | 6 + vendor/riscv-isa-sim/riscv/insns/vwsubu_vv.h | 6 + vendor/riscv-isa-sim/riscv/insns/vwsubu_vx.h | 6 + vendor/riscv-isa-sim/riscv/insns/vwsubu_wv.h | 6 + vendor/riscv-isa-sim/riscv/insns/vwsubu_wx.h | 6 + vendor/riscv-isa-sim/riscv/insns/vxor_vi.h | 5 + vendor/riscv-isa-sim/riscv/insns/vxor_vv.h | 5 + vendor/riscv-isa-sim/riscv/insns/vxor_vx.h | 5 + vendor/riscv-isa-sim/riscv/insns/vzext_vf2.h | 1 + vendor/riscv-isa-sim/riscv/insns/vzext_vf4.h | 1 + vendor/riscv-isa-sim/riscv/insns/vzext_vf8.h | 1 + vendor/riscv-isa-sim/riscv/insns/wfi.h | 11 + vendor/riscv-isa-sim/riscv/insns/xnor.h | 2 + vendor/riscv-isa-sim/riscv/insns/xor.h | 1 + vendor/riscv-isa-sim/riscv/insns/xori.h | 1 + vendor/riscv-isa-sim/riscv/insns/xperm16.h | 2 + vendor/riscv-isa-sim/riscv/insns/xperm32.h | 3 + vendor/riscv-isa-sim/riscv/insns/xperm4.h | 2 + vendor/riscv-isa-sim/riscv/insns/xperm8.h | 2 + vendor/riscv-isa-sim/riscv/insns/zunpkd810.h | 1 + vendor/riscv-isa-sim/riscv/insns/zunpkd820.h | 1 + vendor/riscv-isa-sim/riscv/insns/zunpkd830.h | 1 + vendor/riscv-isa-sim/riscv/insns/zunpkd831.h | 1 + vendor/riscv-isa-sim/riscv/insns/zunpkd832.h | 1 + vendor/riscv-isa-sim/riscv/interactive.cc | 579 ++ vendor/riscv-isa-sim/riscv/isa_parser.cc | 247 + vendor/riscv-isa-sim/riscv/isa_parser.h | 90 + vendor/riscv-isa-sim/riscv/jtag_dtm.cc | 204 + vendor/riscv-isa-sim/riscv/jtag_dtm.h | 69 + vendor/riscv-isa-sim/riscv/log_file.h | 37 + vendor/riscv-isa-sim/riscv/memtracer.h | 56 + vendor/riscv-isa-sim/riscv/mmio_plugin.h | 91 + vendor/riscv-isa-sim/riscv/mmu.cc | 447 + vendor/riscv-isa-sim/riscv/mmu.h | 559 ++ vendor/riscv-isa-sim/riscv/opcodes.h | 249 + vendor/riscv-isa-sim/riscv/overlap_list.h | 8 + vendor/riscv-isa-sim/riscv/platform.h | 11 + vendor/riscv-isa-sim/riscv/processor.cc | 1028 +++ vendor/riscv-isa-sim/riscv/processor.h | 469 + vendor/riscv-isa-sim/riscv/remote_bitbang.cc | 187 + vendor/riscv-isa-sim/riscv/remote_bitbang.h | 34 + vendor/riscv-isa-sim/riscv/riscv.ac | 65 + vendor/riscv-isa-sim/riscv/riscv.mk.in | 1301 +++ vendor/riscv-isa-sim/riscv/rocc.cc | 46 + vendor/riscv-isa-sim/riscv/rocc.h | 61 + vendor/riscv-isa-sim/riscv/rom.cc | 19 + vendor/riscv-isa-sim/riscv/sim.cc | 438 + vendor/riscv-isa-sim/riscv/sim.h | 175 + vendor/riscv-isa-sim/riscv/simif.h | 24 + vendor/riscv-isa-sim/riscv/tracer.h | 11 + vendor/riscv-isa-sim/riscv/trap.h | 116 + vendor/riscv-isa-sim/riscv/triggers.cc | 206 + vendor/riscv-isa-sim/riscv/triggers.h | 138 + vendor/riscv-isa-sim/scripts/config.guess | 1698 ++++ vendor/riscv-isa-sim/scripts/config.sub | 1854 ++++ vendor/riscv-isa-sim/scripts/install.sh | 238 + .../riscv-isa-sim/scripts/mk-install-dirs.sh | 40 + vendor/riscv-isa-sim/scripts/vcs-version.sh | 117 + vendor/riscv-isa-sim/softfloat/f128_add.c | 78 + .../riscv-isa-sim/softfloat/f128_classify.c | 37 + vendor/riscv-isa-sim/softfloat/f128_div.c | 199 + vendor/riscv-isa-sim/softfloat/f128_eq.c | 73 + .../softfloat/f128_eq_signaling.c | 67 + .../softfloat/f128_isSignalingNaN.c | 51 + vendor/riscv-isa-sim/softfloat/f128_le.c | 72 + .../riscv-isa-sim/softfloat/f128_le_quiet.c | 78 + vendor/riscv-isa-sim/softfloat/f128_lt.c | 72 + .../riscv-isa-sim/softfloat/f128_lt_quiet.c | 78 + vendor/riscv-isa-sim/softfloat/f128_mul.c | 163 + vendor/riscv-isa-sim/softfloat/f128_mulAdd.c | 63 + vendor/riscv-isa-sim/softfloat/f128_rem.c | 190 + .../riscv-isa-sim/softfloat/f128_roundToInt.c | 160 + vendor/riscv-isa-sim/softfloat/f128_sqrt.c | 201 + vendor/riscv-isa-sim/softfloat/f128_sub.c | 78 + vendor/riscv-isa-sim/softfloat/f128_to_f16.c | 95 + vendor/riscv-isa-sim/softfloat/f128_to_f32.c | 95 + vendor/riscv-isa-sim/softfloat/f128_to_f64.c | 100 + vendor/riscv-isa-sim/softfloat/f128_to_i32.c | 85 + .../softfloat/f128_to_i32_r_minMag.c | 100 + vendor/riscv-isa-sim/softfloat/f128_to_i64.c | 95 + .../softfloat/f128_to_i64_r_minMag.c | 113 + vendor/riscv-isa-sim/softfloat/f128_to_ui32.c | 86 + .../softfloat/f128_to_ui32_r_minMag.c | 89 + vendor/riscv-isa-sim/softfloat/f128_to_ui64.c | 96 + .../softfloat/f128_to_ui64_r_minMag.c | 105 + vendor/riscv-isa-sim/softfloat/f16_add.c | 70 + vendor/riscv-isa-sim/softfloat/f16_classify.c | 36 + vendor/riscv-isa-sim/softfloat/f16_div.c | 186 + vendor/riscv-isa-sim/softfloat/f16_eq.c | 66 + .../softfloat/f16_eq_signaling.c | 61 + .../softfloat/f16_isSignalingNaN.c | 51 + vendor/riscv-isa-sim/softfloat/f16_le.c | 66 + vendor/riscv-isa-sim/softfloat/f16_le_quiet.c | 71 + vendor/riscv-isa-sim/softfloat/f16_lt.c | 66 + vendor/riscv-isa-sim/softfloat/f16_lt_quiet.c | 71 + vendor/riscv-isa-sim/softfloat/f16_mul.c | 140 + vendor/riscv-isa-sim/softfloat/f16_mulAdd.c | 60 + vendor/riscv-isa-sim/softfloat/f16_rem.c | 171 + .../riscv-isa-sim/softfloat/f16_roundToInt.c | 112 + vendor/riscv-isa-sim/softfloat/f16_sqrt.c | 136 + vendor/riscv-isa-sim/softfloat/f16_sub.c | 70 + vendor/riscv-isa-sim/softfloat/f16_to_f128.c | 96 + vendor/riscv-isa-sim/softfloat/f16_to_f32.c | 93 + vendor/riscv-isa-sim/softfloat/f16_to_f64.c | 93 + vendor/riscv-isa-sim/softfloat/f16_to_i16.c | 57 + vendor/riscv-isa-sim/softfloat/f16_to_i32.c | 87 + .../softfloat/f16_to_i32_r_minMag.c | 88 + vendor/riscv-isa-sim/softfloat/f16_to_i64.c | 87 + .../softfloat/f16_to_i64_r_minMag.c | 88 + vendor/riscv-isa-sim/softfloat/f16_to_i8.c | 57 + vendor/riscv-isa-sim/softfloat/f16_to_ui16.c | 54 + vendor/riscv-isa-sim/softfloat/f16_to_ui32.c | 84 + .../softfloat/f16_to_ui32_r_minMag.c | 87 + vendor/riscv-isa-sim/softfloat/f16_to_ui64.c | 84 + .../softfloat/f16_to_ui64_r_minMag.c | 87 + vendor/riscv-isa-sim/softfloat/f16_to_ui8.c | 54 + vendor/riscv-isa-sim/softfloat/f32_add.c | 70 + vendor/riscv-isa-sim/softfloat/f32_classify.c | 36 + vendor/riscv-isa-sim/softfloat/f32_div.c | 180 + vendor/riscv-isa-sim/softfloat/f32_eq.c | 66 + .../softfloat/f32_eq_signaling.c | 61 + .../softfloat/f32_isSignalingNaN.c | 51 + vendor/riscv-isa-sim/softfloat/f32_le.c | 66 + vendor/riscv-isa-sim/softfloat/f32_le_quiet.c | 71 + vendor/riscv-isa-sim/softfloat/f32_lt.c | 66 + vendor/riscv-isa-sim/softfloat/f32_lt_quiet.c | 71 + vendor/riscv-isa-sim/softfloat/f32_mul.c | 137 + vendor/riscv-isa-sim/softfloat/f32_mulAdd.c | 60 + vendor/riscv-isa-sim/softfloat/f32_rem.c | 168 + .../riscv-isa-sim/softfloat/f32_roundToInt.c | 112 + vendor/riscv-isa-sim/softfloat/f32_sqrt.c | 121 + vendor/riscv-isa-sim/softfloat/f32_sub.c | 70 + vendor/riscv-isa-sim/softfloat/f32_to_f128.c | 96 + vendor/riscv-isa-sim/softfloat/f32_to_f16.c | 88 + vendor/riscv-isa-sim/softfloat/f32_to_f64.c | 93 + vendor/riscv-isa-sim/softfloat/f32_to_i16.c | 57 + vendor/riscv-isa-sim/softfloat/f32_to_i32.c | 84 + .../softfloat/f32_to_i32_r_minMag.c | 89 + vendor/riscv-isa-sim/softfloat/f32_to_i64.c | 96 + .../softfloat/f32_to_i64_r_minMag.c | 94 + vendor/riscv-isa-sim/softfloat/f32_to_ui16.c | 53 + vendor/riscv-isa-sim/softfloat/f32_to_ui32.c | 84 + .../softfloat/f32_to_ui32_r_minMag.c | 88 + vendor/riscv-isa-sim/softfloat/f32_to_ui64.c | 96 + .../softfloat/f32_to_ui64_r_minMag.c | 90 + vendor/riscv-isa-sim/softfloat/f64_add.c | 74 + vendor/riscv-isa-sim/softfloat/f64_classify.c | 36 + vendor/riscv-isa-sim/softfloat/f64_div.c | 172 + vendor/riscv-isa-sim/softfloat/f64_eq.c | 66 + .../softfloat/f64_eq_signaling.c | 61 + .../softfloat/f64_isSignalingNaN.c | 51 + vendor/riscv-isa-sim/softfloat/f64_le.c | 67 + vendor/riscv-isa-sim/softfloat/f64_le_quiet.c | 72 + vendor/riscv-isa-sim/softfloat/f64_lt.c | 67 + vendor/riscv-isa-sim/softfloat/f64_lt_quiet.c | 72 + vendor/riscv-isa-sim/softfloat/f64_mul.c | 150 + vendor/riscv-isa-sim/softfloat/f64_mulAdd.c | 60 + vendor/riscv-isa-sim/softfloat/f64_rem.c | 189 + .../riscv-isa-sim/softfloat/f64_roundToInt.c | 112 + vendor/riscv-isa-sim/softfloat/f64_sqrt.c | 133 + vendor/riscv-isa-sim/softfloat/f64_sub.c | 74 + vendor/riscv-isa-sim/softfloat/f64_to_f128.c | 98 + vendor/riscv-isa-sim/softfloat/f64_to_f16.c | 88 + vendor/riscv-isa-sim/softfloat/f64_to_f32.c | 88 + vendor/riscv-isa-sim/softfloat/f64_to_i32.c | 82 + .../softfloat/f64_to_i32_r_minMag.c | 96 + vendor/riscv-isa-sim/softfloat/f64_to_i64.c | 103 + .../softfloat/f64_to_i64_r_minMag.c | 100 + vendor/riscv-isa-sim/softfloat/f64_to_ui32.c | 82 + .../softfloat/f64_to_ui32_r_minMag.c | 88 + vendor/riscv-isa-sim/softfloat/f64_to_ui64.c | 103 + .../softfloat/f64_to_ui64_r_minMag.c | 93 + vendor/riscv-isa-sim/softfloat/fall_maxmin.c | 81 + .../riscv-isa-sim/softfloat/fall_reciprocal.c | 392 + vendor/riscv-isa-sim/softfloat/i32_to_f128.c | 64 + vendor/riscv-isa-sim/softfloat/i32_to_f16.c | 71 + vendor/riscv-isa-sim/softfloat/i32_to_f32.c | 58 + vendor/riscv-isa-sim/softfloat/i32_to_f64.c | 65 + vendor/riscv-isa-sim/softfloat/i64_to_f128.c | 72 + vendor/riscv-isa-sim/softfloat/i64_to_f16.c | 70 + vendor/riscv-isa-sim/softfloat/i64_to_f32.c | 70 + vendor/riscv-isa-sim/softfloat/i64_to_f64.c | 58 + vendor/riscv-isa-sim/softfloat/internals.h | 286 + vendor/riscv-isa-sim/softfloat/platform.h | 52 + .../riscv-isa-sim/softfloat/primitiveTypes.h | 86 + vendor/riscv-isa-sim/softfloat/primitives.h | 1168 +++ vendor/riscv-isa-sim/softfloat/s_add128.c | 55 + vendor/riscv-isa-sim/softfloat/s_add256M.c | 65 + vendor/riscv-isa-sim/softfloat/s_addCarryM.c | 70 + .../softfloat/s_addComplCarryM.c | 70 + vendor/riscv-isa-sim/softfloat/s_addM.c | 70 + .../riscv-isa-sim/softfloat/s_addMagsF128.c | 154 + vendor/riscv-isa-sim/softfloat/s_addMagsF16.c | 183 + vendor/riscv-isa-sim/softfloat/s_addMagsF32.c | 126 + vendor/riscv-isa-sim/softfloat/s_addMagsF64.c | 128 + .../softfloat/s_approxRecip32_1.c | 66 + .../softfloat/s_approxRecipSqrt32_1.c | 73 + .../softfloat/s_approxRecipSqrt_1Ks.c | 49 + .../softfloat/s_approxRecip_1Ks.c | 49 + .../softfloat/s_commonNaNToF128UI.c | 56 + .../softfloat/s_commonNaNToF16UI.c | 5 + .../softfloat/s_commonNaNToF32UI.c | 5 + .../softfloat/s_commonNaNToF64UI.c | 5 + .../riscv-isa-sim/softfloat/s_compare128M.c | 62 + vendor/riscv-isa-sim/softfloat/s_compare96M.c | 62 + .../softfloat/s_countLeadingZeros16.c | 60 + .../softfloat/s_countLeadingZeros32.c | 64 + .../softfloat/s_countLeadingZeros64.c | 73 + .../softfloat/s_countLeadingZeros8.c | 59 + vendor/riscv-isa-sim/softfloat/s_eq128.c | 51 + .../softfloat/s_f128UIToCommonNaN.c | 5 + .../softfloat/s_f16UIToCommonNaN.c | 5 + .../softfloat/s_f32UIToCommonNaN.c | 5 + .../softfloat/s_f64UIToCommonNaN.c | 5 + vendor/riscv-isa-sim/softfloat/s_le128.c | 51 + vendor/riscv-isa-sim/softfloat/s_lt128.c | 51 + vendor/riscv-isa-sim/softfloat/s_mul128By32.c | 58 + .../riscv-isa-sim/softfloat/s_mul128MTo256M.c | 100 + .../riscv-isa-sim/softfloat/s_mul128To256M.c | 71 + .../softfloat/s_mul64ByShifted32To128.c | 56 + vendor/riscv-isa-sim/softfloat/s_mul64To128.c | 66 + .../riscv-isa-sim/softfloat/s_mul64To128M.c | 68 + vendor/riscv-isa-sim/softfloat/s_mulAddF128.c | 350 + vendor/riscv-isa-sim/softfloat/s_mulAddF16.c | 226 + vendor/riscv-isa-sim/softfloat/s_mulAddF32.c | 224 + vendor/riscv-isa-sim/softfloat/s_mulAddF64.c | 496 ++ vendor/riscv-isa-sim/softfloat/s_negXM.c | 63 + .../softfloat/s_normRoundPackToF128.c | 81 + .../softfloat/s_normRoundPackToF16.c | 58 + .../softfloat/s_normRoundPackToF32.c | 58 + .../softfloat/s_normRoundPackToF64.c | 58 + .../softfloat/s_normSubnormalF128Sig.c | 65 + .../softfloat/s_normSubnormalF16Sig.c | 52 + .../softfloat/s_normSubnormalF32Sig.c | 52 + .../softfloat/s_normSubnormalF64Sig.c | 52 + .../softfloat/s_propagateNaNF128UI.c | 73 + .../softfloat/s_propagateNaNF16UI.c | 58 + .../softfloat/s_propagateNaNF32UI.c | 58 + .../softfloat/s_propagateNaNF64UI.c | 58 + .../riscv-isa-sim/softfloat/s_remStepMBy32.c | 86 + .../riscv-isa-sim/softfloat/s_roundMToI64.c | 88 + .../riscv-isa-sim/softfloat/s_roundMToUI64.c | 84 + .../softfloat/s_roundPackMToI64.c | 88 + .../softfloat/s_roundPackMToUI64.c | 84 + .../softfloat/s_roundPackToF128.c | 171 + .../softfloat/s_roundPackToF16.c | 113 + .../softfloat/s_roundPackToF32.c | 113 + .../softfloat/s_roundPackToF64.c | 117 + .../softfloat/s_roundPackToI32.c | 84 + .../softfloat/s_roundPackToI64.c | 89 + .../softfloat/s_roundPackToUI32.c | 80 + .../softfloat/s_roundPackToUI64.c | 85 + vendor/riscv-isa-sim/softfloat/s_roundToI32.c | 84 + vendor/riscv-isa-sim/softfloat/s_roundToI64.c | 89 + .../riscv-isa-sim/softfloat/s_roundToUI32.c | 80 + .../riscv-isa-sim/softfloat/s_roundToUI64.c | 85 + .../softfloat/s_shiftRightJam128.c | 69 + .../softfloat/s_shiftRightJam128Extra.c | 77 + .../softfloat/s_shiftRightJam256M.c | 126 + .../softfloat/s_shiftRightJam32.c | 51 + .../softfloat/s_shiftRightJam64.c | 51 + .../softfloat/s_shiftRightJam64Extra.c | 62 + .../softfloat/s_shortShiftLeft128.c | 55 + .../softfloat/s_shortShiftLeft64To96M.c | 56 + .../softfloat/s_shortShiftRight128.c | 55 + .../softfloat/s_shortShiftRightExtendM.c | 73 + .../softfloat/s_shortShiftRightJam128.c | 60 + .../softfloat/s_shortShiftRightJam128Extra.c | 59 + .../softfloat/s_shortShiftRightJam64.c | 50 + .../softfloat/s_shortShiftRightJam64Extra.c | 56 + .../softfloat/s_shortShiftRightM.c | 70 + vendor/riscv-isa-sim/softfloat/s_sub128.c | 55 + vendor/riscv-isa-sim/softfloat/s_sub1XM.c | 60 + vendor/riscv-isa-sim/softfloat/s_sub256M.c | 65 + vendor/riscv-isa-sim/softfloat/s_subM.c | 70 + .../riscv-isa-sim/softfloat/s_subMagsF128.c | 139 + vendor/riscv-isa-sim/softfloat/s_subMagsF16.c | 187 + vendor/riscv-isa-sim/softfloat/s_subMagsF32.c | 143 + vendor/riscv-isa-sim/softfloat/s_subMagsF64.c | 141 + vendor/riscv-isa-sim/softfloat/softfloat.ac | 0 vendor/riscv-isa-sim/softfloat/softfloat.h | 402 + .../riscv-isa-sim/softfloat/softfloat.mk.in | 241 + .../softfloat/softfloat_raiseFlags.c | 52 + .../riscv-isa-sim/softfloat/softfloat_state.c | 52 + .../riscv-isa-sim/softfloat/softfloat_types.h | 81 + vendor/riscv-isa-sim/softfloat/specialize.h | 429 + vendor/riscv-isa-sim/softfloat/ui32_to_f128.c | 60 + vendor/riscv-isa-sim/softfloat/ui32_to_f16.c | 65 + vendor/riscv-isa-sim/softfloat/ui32_to_f32.c | 57 + vendor/riscv-isa-sim/softfloat/ui32_to_f64.c | 59 + vendor/riscv-isa-sim/softfloat/ui64_to_f128.c | 68 + vendor/riscv-isa-sim/softfloat/ui64_to_f16.c | 64 + vendor/riscv-isa-sim/softfloat/ui64_to_f32.c | 64 + vendor/riscv-isa-sim/softfloat/ui64_to_f64.c | 59 + vendor/riscv-isa-sim/spike_dasm/spike-dasm.cc | 70 + vendor/riscv-isa-sim/spike_dasm/spike_dasm.ac | 0 .../riscv-isa-sim/spike_dasm/spike_dasm.mk.in | 10 + .../spike_dasm/spike_dasm_option_parser.cc | 51 + .../spike_main/spike-log-parser.cc | 61 + vendor/riscv-isa-sim/spike_main/spike.cc | 530 ++ vendor/riscv-isa-sim/spike_main/spike_main.ac | 0 .../riscv-isa-sim/spike_main/spike_main.mk.in | 16 + .../spike_main/termios-xspike.cc | 29 + vendor/riscv-isa-sim/spike_main/xspike.cc | 102 + vendor/riscv-isa-sim/tests/ebreak.py | 26 + vendor/riscv-isa-sim/tests/ebreak.s | 5 + vendor/riscv-isa-sim/tests/mseccfg/Makefile | 70 + vendor/riscv-isa-sim/tests/mseccfg/crt.S | 230 + vendor/riscv-isa-sim/tests/mseccfg/encoding.h | 1473 ++++ .../tests/mseccfg/gengen_src/Makefile | 17 + .../tests/mseccfg/gengen_src/gen_pmp_test.cc | 379 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_01.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_02.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_03.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_04.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_05.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_06.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_07.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_11.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_12.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_13.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_14.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_15.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_16.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_17.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_00.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_01.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_02.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_03.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_04.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_05.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_06.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_07.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_01.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_02.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_03.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_04.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_05.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_06.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_07.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_11.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_12.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_13.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_14.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_15.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_16.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_17.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_00.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_01.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_02.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_03.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_04.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_05.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_06.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_07.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_01.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_02.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_03.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_04.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_05.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_06.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_07.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_11.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_12.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_13.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_14.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_15.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_16.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_17.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_00.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_01.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_02.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_03.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_04.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_05.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_06.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_07.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_01.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_02.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_03.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_04.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_05.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_06.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_07.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_11.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_12.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_13.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_14.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_15.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_16.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_17.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_00.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_01.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_02.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_03.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_04.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_05.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_06.c | 314 + ..._pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_07.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_01.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_02.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_03.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_04.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_05.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_06.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_07.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_11.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_12.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_13.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_14.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_15.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_16.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_17.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_00.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_01.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_02.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_03.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_04.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_05.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_06.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_07.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_01.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_02.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_03.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_04.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_05.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_06.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_07.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_11.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_12.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_13.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_14.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_15.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_16.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_17.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_00.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_01.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_02.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_03.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_04.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_05.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_06.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_07.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_01.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_02.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_03.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_04.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_05.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_06.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_07.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_11.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_12.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_13.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_14.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_15.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_16.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_17.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_00.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_01.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_02.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_03.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_04.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_05.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_06.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_07.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_01.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_02.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_03.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_04.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_05.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_06.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_07.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_11.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_12.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_13.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_14.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_15.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_16.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_17.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_00.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_01.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_02.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_03.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_04.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_05.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_06.c | 314 + ..._pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_07.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_01.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_02.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_03.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_04.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_05.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_06.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_07.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_11.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_12.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_13.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_14.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_15.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_16.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_17.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_00.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_01.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_02.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_03.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_04.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_05.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_06.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_07.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_01.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_02.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_03.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_04.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_05.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_06.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_07.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_11.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_12.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_13.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_14.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_15.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_16.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_17.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_00.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_01.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_02.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_03.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_04.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_05.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_06.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_07.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_01.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_02.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_03.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_04.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_05.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_06.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_07.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_11.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_12.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_13.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_14.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_15.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_16.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_17.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_00.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_01.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_02.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_03.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_04.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_05.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_06.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_07.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_01.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_02.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_03.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_04.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_05.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_06.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_07.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_11.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_12.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_13.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_14.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_15.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_16.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_17.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_00.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_01.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_02.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_03.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_04.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_05.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_06.c | 314 + ..._pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_07.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_01.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_02.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_03.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_04.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_05.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_06.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_07.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_11.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_12.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_13.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_14.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_15.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_16.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_17.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_00.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_01.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_02.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_03.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_04.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_05.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_06.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_07.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_01.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_02.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_03.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_04.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_05.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_06.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_07.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_11.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_12.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_13.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_14.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_15.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_16.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_17.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_00.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_01.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_02.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_03.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_04.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_05.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_06.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_07.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_01.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_02.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_03.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_04.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_05.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_06.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_07.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_11.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_12.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_13.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_14.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_15.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_16.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_17.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_00.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_01.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_02.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_03.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_04.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_05.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_06.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_07.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_01.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_02.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_03.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_04.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_05.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_06.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_07.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_11.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_12.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_13.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_14.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_15.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_16.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_17.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_00.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_01.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_02.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_03.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_04.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_05.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_06.c | 314 + ..._pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_07.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_01.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_02.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_03.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_04.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_05.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_06.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_07.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_11.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_12.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_13.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_14.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_15.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_16.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_17.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_00.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_01.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_02.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_03.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_04.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_05.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_06.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_07.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_01.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_02.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_03.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_04.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_05.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_06.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_07.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_11.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_12.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_13.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_14.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_15.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_16.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_17.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_00.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_01.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_02.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_03.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_04.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_05.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_06.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_07.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_01.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_02.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_03.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_04.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_05.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_06.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_07.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_11.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_12.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_13.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_14.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_15.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_16.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_17.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_00.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_01.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_02.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_03.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_04.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_05.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_06.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_07.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_01.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_02.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_03.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_04.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_05.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_06.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_07.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_11.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_12.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_13.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_14.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_15.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_16.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_17.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_00.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_01.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_02.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_03.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_04.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_05.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_06.c | 314 + ..._pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_07.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_01.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_02.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_03.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_04.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_05.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_06.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_07.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_11.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_12.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_13.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_14.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_15.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_16.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_17.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_00.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_01.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_02.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_03.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_04.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_05.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_06.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_07.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_01.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_02.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_03.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_04.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_05.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_06.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_07.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_11.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_12.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_13.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_14.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_15.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_16.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_17.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_00.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_01.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_02.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_03.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_04.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_05.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_06.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_07.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_01.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_02.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_03.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_04.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_05.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_06.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_07.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_11.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_12.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_13.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_14.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_15.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_16.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_17.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_00.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_01.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_02.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_03.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_04.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_05.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_06.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_07.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_01.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_02.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_03.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_04.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_05.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_06.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_07.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_11.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_12.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_13.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_14.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_15.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_16.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_17.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_00.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_01.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_02.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_03.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_04.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_05.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_06.c | 314 + ..._pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_07.c | 314 + ...pmp_ok_1_u0_rw00_x0_l0_match0_mmwp0_mml0.c | 345 + ...pmp_ok_1_u0_rw00_x0_l0_match0_mmwp0_mml1.c | 345 + ...pmp_ok_1_u0_rw00_x0_l0_match0_mmwp1_mml0.c | 345 + ...pmp_ok_1_u0_rw00_x0_l0_match0_mmwp1_mml1.c | 345 + ...pmp_ok_1_u0_rw00_x0_l0_match1_mmwp0_mml0.c | 345 + ...pmp_ok_1_u0_rw00_x0_l0_match1_mmwp0_mml1.c | 345 + ...pmp_ok_1_u0_rw00_x0_l0_match1_mmwp1_mml0.c | 345 + ...pmp_ok_1_u0_rw00_x0_l0_match1_mmwp1_mml1.c | 345 + ...pmp_ok_1_u0_rw00_x0_l1_match0_mmwp0_mml0.c | 345 + ...pmp_ok_1_u0_rw00_x0_l1_match0_mmwp0_mml1.c | 345 + ...pmp_ok_1_u0_rw00_x0_l1_match0_mmwp1_mml0.c | 345 + ...pmp_ok_1_u0_rw00_x0_l1_match0_mmwp1_mml1.c | 345 + ...pmp_ok_1_u0_rw00_x0_l1_match1_mmwp0_mml0.c | 345 + ...pmp_ok_1_u0_rw00_x0_l1_match1_mmwp0_mml1.c | 345 + ...pmp_ok_1_u0_rw00_x0_l1_match1_mmwp1_mml0.c | 345 + ...pmp_ok_1_u0_rw00_x0_l1_match1_mmwp1_mml1.c | 345 + ...pmp_ok_1_u0_rw00_x1_l0_match0_mmwp0_mml0.c | 345 + ...pmp_ok_1_u0_rw00_x1_l0_match0_mmwp0_mml1.c | 345 + ...pmp_ok_1_u0_rw00_x1_l0_match0_mmwp1_mml0.c | 345 + ...pmp_ok_1_u0_rw00_x1_l0_match0_mmwp1_mml1.c | 345 + ...pmp_ok_1_u0_rw00_x1_l0_match1_mmwp0_mml0.c | 345 + ...pmp_ok_1_u0_rw00_x1_l0_match1_mmwp0_mml1.c | 345 + ...pmp_ok_1_u0_rw00_x1_l0_match1_mmwp1_mml0.c | 345 + ...pmp_ok_1_u0_rw00_x1_l0_match1_mmwp1_mml1.c | 345 + ...pmp_ok_1_u0_rw00_x1_l1_match0_mmwp0_mml0.c | 345 + ...pmp_ok_1_u0_rw00_x1_l1_match0_mmwp0_mml1.c | 345 + ...pmp_ok_1_u0_rw00_x1_l1_match0_mmwp1_mml0.c | 345 + ...pmp_ok_1_u0_rw00_x1_l1_match0_mmwp1_mml1.c | 345 + ...pmp_ok_1_u0_rw00_x1_l1_match1_mmwp0_mml0.c | 345 + ...pmp_ok_1_u0_rw00_x1_l1_match1_mmwp0_mml1.c | 345 + ...pmp_ok_1_u0_rw00_x1_l1_match1_mmwp1_mml0.c | 345 + ...pmp_ok_1_u0_rw00_x1_l1_match1_mmwp1_mml1.c | 345 + ...pmp_ok_1_u0_rw10_x0_l0_match0_mmwp0_mml0.c | 345 + ...pmp_ok_1_u0_rw10_x0_l0_match0_mmwp0_mml1.c | 345 + ...pmp_ok_1_u0_rw10_x0_l0_match0_mmwp1_mml0.c | 345 + ...pmp_ok_1_u0_rw10_x0_l0_match0_mmwp1_mml1.c | 345 + ...pmp_ok_1_u0_rw10_x0_l0_match1_mmwp0_mml0.c | 345 + ...pmp_ok_1_u0_rw10_x0_l0_match1_mmwp0_mml1.c | 345 + ...pmp_ok_1_u0_rw10_x0_l0_match1_mmwp1_mml0.c | 345 + ...pmp_ok_1_u0_rw10_x0_l0_match1_mmwp1_mml1.c | 345 + ...pmp_ok_1_u0_rw10_x0_l1_match0_mmwp0_mml0.c | 345 + ...pmp_ok_1_u0_rw10_x0_l1_match0_mmwp0_mml1.c | 345 + ...pmp_ok_1_u0_rw10_x0_l1_match0_mmwp1_mml0.c | 345 + ...pmp_ok_1_u0_rw10_x0_l1_match0_mmwp1_mml1.c | 345 + ...pmp_ok_1_u0_rw10_x0_l1_match1_mmwp0_mml0.c | 345 + ...pmp_ok_1_u0_rw10_x0_l1_match1_mmwp0_mml1.c | 345 + ...pmp_ok_1_u0_rw10_x0_l1_match1_mmwp1_mml0.c | 345 + ...pmp_ok_1_u0_rw10_x0_l1_match1_mmwp1_mml1.c | 345 + ...pmp_ok_1_u0_rw10_x1_l0_match0_mmwp0_mml0.c | 345 + ...pmp_ok_1_u0_rw10_x1_l0_match0_mmwp0_mml1.c | 345 + ...pmp_ok_1_u0_rw10_x1_l0_match0_mmwp1_mml0.c | 345 + ...pmp_ok_1_u0_rw10_x1_l0_match0_mmwp1_mml1.c | 345 + ...pmp_ok_1_u0_rw10_x1_l0_match1_mmwp0_mml0.c | 345 + ...pmp_ok_1_u0_rw10_x1_l0_match1_mmwp0_mml1.c | 345 + ...pmp_ok_1_u0_rw10_x1_l0_match1_mmwp1_mml0.c | 345 + ...pmp_ok_1_u0_rw10_x1_l0_match1_mmwp1_mml1.c | 345 + ...pmp_ok_1_u0_rw10_x1_l1_match0_mmwp0_mml0.c | 345 + ...pmp_ok_1_u0_rw10_x1_l1_match0_mmwp0_mml1.c | 345 + ...pmp_ok_1_u0_rw10_x1_l1_match0_mmwp1_mml0.c | 345 + ...pmp_ok_1_u0_rw10_x1_l1_match0_mmwp1_mml1.c | 345 + ...pmp_ok_1_u0_rw10_x1_l1_match1_mmwp0_mml0.c | 345 + ...pmp_ok_1_u0_rw10_x1_l1_match1_mmwp0_mml1.c | 345 + ...pmp_ok_1_u0_rw10_x1_l1_match1_mmwp1_mml0.c | 345 + ...pmp_ok_1_u0_rw10_x1_l1_match1_mmwp1_mml1.c | 345 + ...pmp_ok_1_u0_rw11_x0_l0_match0_mmwp0_mml0.c | 345 + ...pmp_ok_1_u0_rw11_x0_l0_match0_mmwp0_mml1.c | 345 + ...pmp_ok_1_u0_rw11_x0_l0_match0_mmwp1_mml0.c | 345 + ...pmp_ok_1_u0_rw11_x0_l0_match0_mmwp1_mml1.c | 345 + ...pmp_ok_1_u0_rw11_x0_l0_match1_mmwp0_mml0.c | 345 + ...pmp_ok_1_u0_rw11_x0_l0_match1_mmwp0_mml1.c | 345 + ...pmp_ok_1_u0_rw11_x0_l0_match1_mmwp1_mml0.c | 345 + ...pmp_ok_1_u0_rw11_x0_l0_match1_mmwp1_mml1.c | 345 + ...pmp_ok_1_u0_rw11_x0_l1_match0_mmwp0_mml0.c | 345 + ...pmp_ok_1_u0_rw11_x0_l1_match0_mmwp0_mml1.c | 345 + ...pmp_ok_1_u0_rw11_x0_l1_match0_mmwp1_mml0.c | 345 + ...pmp_ok_1_u0_rw11_x0_l1_match0_mmwp1_mml1.c | 345 + ...pmp_ok_1_u0_rw11_x0_l1_match1_mmwp0_mml0.c | 345 + ...pmp_ok_1_u0_rw11_x0_l1_match1_mmwp0_mml1.c | 345 + ...pmp_ok_1_u0_rw11_x0_l1_match1_mmwp1_mml0.c | 345 + ...pmp_ok_1_u0_rw11_x0_l1_match1_mmwp1_mml1.c | 345 + ...pmp_ok_1_u0_rw11_x1_l0_match0_mmwp0_mml0.c | 345 + ...pmp_ok_1_u0_rw11_x1_l0_match0_mmwp0_mml1.c | 345 + ...pmp_ok_1_u0_rw11_x1_l0_match0_mmwp1_mml0.c | 345 + ...pmp_ok_1_u0_rw11_x1_l0_match0_mmwp1_mml1.c | 345 + ...pmp_ok_1_u0_rw11_x1_l0_match1_mmwp0_mml0.c | 345 + ...pmp_ok_1_u0_rw11_x1_l0_match1_mmwp0_mml1.c | 345 + ...pmp_ok_1_u0_rw11_x1_l0_match1_mmwp1_mml0.c | 345 + ...pmp_ok_1_u0_rw11_x1_l0_match1_mmwp1_mml1.c | 345 + ...pmp_ok_1_u0_rw11_x1_l1_match0_mmwp0_mml0.c | 345 + ...pmp_ok_1_u0_rw11_x1_l1_match0_mmwp0_mml1.c | 345 + ...pmp_ok_1_u0_rw11_x1_l1_match0_mmwp1_mml0.c | 345 + ...pmp_ok_1_u0_rw11_x1_l1_match0_mmwp1_mml1.c | 345 + ...pmp_ok_1_u0_rw11_x1_l1_match1_mmwp0_mml0.c | 345 + ...pmp_ok_1_u0_rw11_x1_l1_match1_mmwp0_mml1.c | 345 + ...pmp_ok_1_u0_rw11_x1_l1_match1_mmwp1_mml0.c | 345 + ...pmp_ok_1_u0_rw11_x1_l1_match1_mmwp1_mml1.c | 345 + ...pmp_ok_1_u1_rw00_x0_l0_match0_mmwp0_mml0.c | 345 + ...pmp_ok_1_u1_rw00_x0_l0_match0_mmwp0_mml1.c | 345 + ...pmp_ok_1_u1_rw00_x0_l0_match0_mmwp1_mml0.c | 345 + ...pmp_ok_1_u1_rw00_x0_l0_match0_mmwp1_mml1.c | 345 + ...pmp_ok_1_u1_rw00_x0_l0_match1_mmwp0_mml0.c | 345 + ...pmp_ok_1_u1_rw00_x0_l0_match1_mmwp0_mml1.c | 345 + ...pmp_ok_1_u1_rw00_x0_l0_match1_mmwp1_mml0.c | 345 + ...pmp_ok_1_u1_rw00_x0_l0_match1_mmwp1_mml1.c | 345 + ...pmp_ok_1_u1_rw00_x0_l1_match0_mmwp0_mml0.c | 345 + ...pmp_ok_1_u1_rw00_x0_l1_match0_mmwp0_mml1.c | 345 + ...pmp_ok_1_u1_rw00_x0_l1_match0_mmwp1_mml0.c | 345 + ...pmp_ok_1_u1_rw00_x0_l1_match0_mmwp1_mml1.c | 345 + ...pmp_ok_1_u1_rw00_x0_l1_match1_mmwp0_mml0.c | 345 + ...pmp_ok_1_u1_rw00_x0_l1_match1_mmwp0_mml1.c | 345 + ...pmp_ok_1_u1_rw00_x0_l1_match1_mmwp1_mml0.c | 345 + ...pmp_ok_1_u1_rw00_x0_l1_match1_mmwp1_mml1.c | 345 + ...pmp_ok_1_u1_rw00_x1_l0_match0_mmwp0_mml0.c | 345 + ...pmp_ok_1_u1_rw00_x1_l0_match0_mmwp0_mml1.c | 345 + ...pmp_ok_1_u1_rw00_x1_l0_match0_mmwp1_mml0.c | 345 + ...pmp_ok_1_u1_rw00_x1_l0_match0_mmwp1_mml1.c | 345 + ...pmp_ok_1_u1_rw00_x1_l0_match1_mmwp0_mml0.c | 345 + ...pmp_ok_1_u1_rw00_x1_l0_match1_mmwp0_mml1.c | 345 + ...pmp_ok_1_u1_rw00_x1_l0_match1_mmwp1_mml0.c | 345 + ...pmp_ok_1_u1_rw00_x1_l0_match1_mmwp1_mml1.c | 345 + ...pmp_ok_1_u1_rw00_x1_l1_match0_mmwp0_mml0.c | 345 + ...pmp_ok_1_u1_rw00_x1_l1_match0_mmwp0_mml1.c | 345 + ...pmp_ok_1_u1_rw00_x1_l1_match0_mmwp1_mml0.c | 345 + ...pmp_ok_1_u1_rw00_x1_l1_match0_mmwp1_mml1.c | 345 + ...pmp_ok_1_u1_rw00_x1_l1_match1_mmwp0_mml0.c | 345 + ...pmp_ok_1_u1_rw00_x1_l1_match1_mmwp0_mml1.c | 345 + ...pmp_ok_1_u1_rw00_x1_l1_match1_mmwp1_mml0.c | 345 + ...pmp_ok_1_u1_rw00_x1_l1_match1_mmwp1_mml1.c | 345 + ...pmp_ok_1_u1_rw10_x0_l0_match0_mmwp0_mml0.c | 345 + ...pmp_ok_1_u1_rw10_x0_l0_match0_mmwp0_mml1.c | 345 + ...pmp_ok_1_u1_rw10_x0_l0_match0_mmwp1_mml0.c | 345 + ...pmp_ok_1_u1_rw10_x0_l0_match0_mmwp1_mml1.c | 345 + ...pmp_ok_1_u1_rw10_x0_l0_match1_mmwp0_mml0.c | 345 + ...pmp_ok_1_u1_rw10_x0_l0_match1_mmwp0_mml1.c | 345 + ...pmp_ok_1_u1_rw10_x0_l0_match1_mmwp1_mml0.c | 345 + ...pmp_ok_1_u1_rw10_x0_l0_match1_mmwp1_mml1.c | 345 + ...pmp_ok_1_u1_rw10_x0_l1_match0_mmwp0_mml0.c | 345 + ...pmp_ok_1_u1_rw10_x0_l1_match0_mmwp0_mml1.c | 345 + ...pmp_ok_1_u1_rw10_x0_l1_match0_mmwp1_mml0.c | 345 + ...pmp_ok_1_u1_rw10_x0_l1_match0_mmwp1_mml1.c | 345 + ...pmp_ok_1_u1_rw10_x0_l1_match1_mmwp0_mml0.c | 345 + ...pmp_ok_1_u1_rw10_x0_l1_match1_mmwp0_mml1.c | 345 + ...pmp_ok_1_u1_rw10_x0_l1_match1_mmwp1_mml0.c | 345 + ...pmp_ok_1_u1_rw10_x0_l1_match1_mmwp1_mml1.c | 345 + ...pmp_ok_1_u1_rw10_x1_l0_match0_mmwp0_mml0.c | 345 + ...pmp_ok_1_u1_rw10_x1_l0_match0_mmwp0_mml1.c | 345 + ...pmp_ok_1_u1_rw10_x1_l0_match0_mmwp1_mml0.c | 345 + ...pmp_ok_1_u1_rw10_x1_l0_match0_mmwp1_mml1.c | 345 + ...pmp_ok_1_u1_rw10_x1_l0_match1_mmwp0_mml0.c | 345 + ...pmp_ok_1_u1_rw10_x1_l0_match1_mmwp0_mml1.c | 345 + ...pmp_ok_1_u1_rw10_x1_l0_match1_mmwp1_mml0.c | 345 + ...pmp_ok_1_u1_rw10_x1_l0_match1_mmwp1_mml1.c | 345 + ...pmp_ok_1_u1_rw10_x1_l1_match0_mmwp0_mml0.c | 345 + ...pmp_ok_1_u1_rw10_x1_l1_match0_mmwp0_mml1.c | 345 + ...pmp_ok_1_u1_rw10_x1_l1_match0_mmwp1_mml0.c | 345 + ...pmp_ok_1_u1_rw10_x1_l1_match0_mmwp1_mml1.c | 345 + ...pmp_ok_1_u1_rw10_x1_l1_match1_mmwp0_mml0.c | 345 + ...pmp_ok_1_u1_rw10_x1_l1_match1_mmwp0_mml1.c | 345 + ...pmp_ok_1_u1_rw10_x1_l1_match1_mmwp1_mml0.c | 345 + ...pmp_ok_1_u1_rw10_x1_l1_match1_mmwp1_mml1.c | 345 + ...pmp_ok_1_u1_rw11_x0_l0_match0_mmwp0_mml0.c | 345 + ...pmp_ok_1_u1_rw11_x0_l0_match0_mmwp0_mml1.c | 345 + ...pmp_ok_1_u1_rw11_x0_l0_match0_mmwp1_mml0.c | 345 + ...pmp_ok_1_u1_rw11_x0_l0_match0_mmwp1_mml1.c | 345 + ...pmp_ok_1_u1_rw11_x0_l0_match1_mmwp0_mml0.c | 345 + ...pmp_ok_1_u1_rw11_x0_l0_match1_mmwp0_mml1.c | 345 + ...pmp_ok_1_u1_rw11_x0_l0_match1_mmwp1_mml0.c | 345 + ...pmp_ok_1_u1_rw11_x0_l0_match1_mmwp1_mml1.c | 345 + ...pmp_ok_1_u1_rw11_x0_l1_match0_mmwp0_mml0.c | 345 + ...pmp_ok_1_u1_rw11_x0_l1_match0_mmwp0_mml1.c | 345 + ...pmp_ok_1_u1_rw11_x0_l1_match0_mmwp1_mml0.c | 345 + ...pmp_ok_1_u1_rw11_x0_l1_match0_mmwp1_mml1.c | 345 + ...pmp_ok_1_u1_rw11_x0_l1_match1_mmwp0_mml0.c | 345 + ...pmp_ok_1_u1_rw11_x0_l1_match1_mmwp0_mml1.c | 345 + ...pmp_ok_1_u1_rw11_x0_l1_match1_mmwp1_mml0.c | 345 + ...pmp_ok_1_u1_rw11_x0_l1_match1_mmwp1_mml1.c | 345 + ...pmp_ok_1_u1_rw11_x1_l0_match0_mmwp0_mml0.c | 345 + ...pmp_ok_1_u1_rw11_x1_l0_match0_mmwp0_mml1.c | 345 + ...pmp_ok_1_u1_rw11_x1_l0_match0_mmwp1_mml0.c | 345 + ...pmp_ok_1_u1_rw11_x1_l0_match0_mmwp1_mml1.c | 345 + ...pmp_ok_1_u1_rw11_x1_l0_match1_mmwp0_mml0.c | 345 + ...pmp_ok_1_u1_rw11_x1_l0_match1_mmwp0_mml1.c | 345 + ...pmp_ok_1_u1_rw11_x1_l0_match1_mmwp1_mml0.c | 345 + ...pmp_ok_1_u1_rw11_x1_l0_match1_mmwp1_mml1.c | 345 + ...pmp_ok_1_u1_rw11_x1_l1_match0_mmwp0_mml0.c | 345 + ...pmp_ok_1_u1_rw11_x1_l1_match0_mmwp0_mml1.c | 345 + ...pmp_ok_1_u1_rw11_x1_l1_match0_mmwp1_mml0.c | 345 + ...pmp_ok_1_u1_rw11_x1_l1_match0_mmwp1_mml1.c | 345 + ...pmp_ok_1_u1_rw11_x1_l1_match1_mmwp0_mml0.c | 345 + ...pmp_ok_1_u1_rw11_x1_l1_match1_mmwp0_mml1.c | 345 + ...pmp_ok_1_u1_rw11_x1_l1_match1_mmwp1_mml0.c | 345 + ...pmp_ok_1_u1_rw11_x1_l1_match1_mmwp1_mml1.c | 345 + ...pmp_ok_share_1_r0_x0_cfgl0_typex0_umode0.c | 296 + ...pmp_ok_share_1_r0_x0_cfgl0_typex0_umode1.c | 296 + ...pmp_ok_share_1_r0_x0_cfgl0_typex1_umode0.c | 296 + ...pmp_ok_share_1_r0_x0_cfgl0_typex1_umode1.c | 296 + ...pmp_ok_share_1_r0_x0_cfgl1_typex0_umode0.c | 296 + ...pmp_ok_share_1_r0_x0_cfgl1_typex0_umode1.c | 296 + ...pmp_ok_share_1_r0_x0_cfgl1_typex1_umode0.c | 296 + ...pmp_ok_share_1_r0_x0_cfgl1_typex1_umode1.c | 296 + ...pmp_ok_share_1_r0_x1_cfgl0_typex0_umode0.c | 296 + ...pmp_ok_share_1_r0_x1_cfgl0_typex0_umode1.c | 296 + ...pmp_ok_share_1_r0_x1_cfgl0_typex1_umode0.c | 296 + ...pmp_ok_share_1_r0_x1_cfgl0_typex1_umode1.c | 296 + ...pmp_ok_share_1_r0_x1_cfgl1_typex0_umode0.c | 296 + ...pmp_ok_share_1_r0_x1_cfgl1_typex0_umode1.c | 296 + ...pmp_ok_share_1_r0_x1_cfgl1_typex1_umode0.c | 296 + ...pmp_ok_share_1_r0_x1_cfgl1_typex1_umode1.c | 296 + ...pmp_ok_share_1_r1_x0_cfgl0_typex0_umode1.c | 296 + ...pmp_ok_share_1_r1_x0_cfgl0_typex1_umode1.c | 296 + ...pmp_ok_share_1_r1_x0_cfgl1_typex0_umode1.c | 296 + ...pmp_ok_share_1_r1_x0_cfgl1_typex1_umode1.c | 296 + ...pmp_ok_share_1_r1_x1_cfgl0_typex0_umode1.c | 296 + ...pmp_ok_share_1_r1_x1_cfgl0_typex1_umode1.c | 296 + ...pmp_ok_share_1_r1_x1_cfgl1_typex0_umode1.c | 296 + ...pmp_ok_share_1_r1_x1_cfgl1_typex1_umode1.c | 296 + .../mseccfg/gengen_src/test_pmp_csr_1.cc_skel | 313 + .../tests/mseccfg/gengen_src/test_pmp_csr_1.h | 1170 +++ .../mseccfg/gengen_src/test_pmp_ok_1.cc_skel | 344 + .../tests/mseccfg/gengen_src/test_pmp_ok_1.h | 1177 +++ .../gengen_src/test_pmp_ok_share_1.cc_skel | 295 + .../mseccfg/gengen_src/test_pmp_ok_share_1.h | 997 +++ .../tests/mseccfg/gengen_tool/Makefile.inc | 30 + .../tests/mseccfg/gengen_tool/gengen | Bin 0 -> 1901792 bytes .../tests/mseccfg/mseccfg_test.ld | 79 + .../pmp_enhancement_sail_spike_unit_test.doc | 3409 ++++++++ vendor/riscv-isa-sim/tests/mseccfg/syscalls.c | 485 ++ vendor/riscv-isa-sim/tests/mseccfg/util.h | 90 + vendor/riscv-isa-sim/tests/testlib.py | 116 + vendor/riscv_isa_sim.lock.hjson | 4 +- vendor/riscv_isa_sim.vendor.hjson | 8 +- 2326 files changed, 331161 insertions(+), 6 deletions(-) create mode 100644 vendor/riscv-isa-sim/.github/workflows/apt-packages.txt create mode 100644 vendor/riscv-isa-sim/.github/workflows/continuous-integration.yml create mode 100644 vendor/riscv-isa-sim/.gitignore create mode 100644 vendor/riscv-isa-sim/ChangeLog.md create mode 100644 vendor/riscv-isa-sim/LICENSE create mode 100644 vendor/riscv-isa-sim/Makefile.in create mode 100644 vendor/riscv-isa-sim/README.md create mode 100644 vendor/riscv-isa-sim/VERSION create mode 100644 vendor/riscv-isa-sim/aclocal.m4 create mode 100644 vendor/riscv-isa-sim/arch_test_target/spike/Makefile.include create mode 100644 vendor/riscv-isa-sim/arch_test_target/spike/README.md create mode 100644 vendor/riscv-isa-sim/arch_test_target/spike/device/Makefile_common.inc create mode 100644 vendor/riscv-isa-sim/arch_test_target/spike/device/rv32e_unratified/C/Makefile.include create mode 100644 vendor/riscv-isa-sim/arch_test_target/spike/device/rv32e_unratified/E/Makefile.include create mode 100644 vendor/riscv-isa-sim/arch_test_target/spike/device/rv32e_unratified/M/Makefile.include create mode 100644 vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/C/Makefile.include create mode 100644 vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/F/Makefile.include create mode 100644 vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/I/Makefile.include create mode 100644 vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/M/Makefile.include create mode 100644 vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/Zifencei/Makefile.include create mode 100644 vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/privilege/Makefile.include create mode 100644 vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/C/Makefile.include create mode 100644 vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/D/Makefile.include create mode 100644 vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/I/Makefile.include create mode 100644 vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/M/Makefile.include create mode 100644 vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/Zifencei/Makefile.include create mode 100644 vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/privilege/Makefile.include create mode 100644 vendor/riscv-isa-sim/arch_test_target/spike/link.ld create mode 100644 vendor/riscv-isa-sim/arch_test_target/spike/model_test.h create mode 100644 vendor/riscv-isa-sim/ax_append_flag.m4 create mode 100644 vendor/riscv-isa-sim/ax_append_link_flags.m4 create mode 100644 vendor/riscv-isa-sim/ax_boost_asio.m4 create mode 100644 vendor/riscv-isa-sim/ax_boost_base.m4 create mode 100644 vendor/riscv-isa-sim/ax_boost_regex.m4 create mode 100644 vendor/riscv-isa-sim/ax_check_compile_flag.m4 create mode 100644 vendor/riscv-isa-sim/ax_check_link_flag.m4 create mode 100644 vendor/riscv-isa-sim/ax_require_defined.m4 create mode 100755 vendor/riscv-isa-sim/ci-tests/test-spike create mode 100644 vendor/riscv-isa-sim/config.h.in create mode 100755 vendor/riscv-isa-sim/configure create mode 100644 vendor/riscv-isa-sim/configure.ac create mode 100644 vendor/riscv-isa-sim/customext/cflush.cc create mode 100644 vendor/riscv-isa-sim/customext/customext.ac create mode 100644 vendor/riscv-isa-sim/customext/customext.mk.in create mode 100644 vendor/riscv-isa-sim/customext/dummy_rocc.cc create mode 100644 vendor/riscv-isa-sim/customext/dummy_rocc_test.c create mode 100644 vendor/riscv-isa-sim/debug_rom/.gitignore create mode 100644 vendor/riscv-isa-sim/debug_rom/Makefile create mode 100755 vendor/riscv-isa-sim/debug_rom/debug_rom.S create mode 100644 vendor/riscv-isa-sim/debug_rom/debug_rom.h create mode 100644 vendor/riscv-isa-sim/debug_rom/link.ld create mode 100644 vendor/riscv-isa-sim/disasm/disasm.ac create mode 100644 vendor/riscv-isa-sim/disasm/disasm.cc create mode 100644 vendor/riscv-isa-sim/disasm/disasm.mk.in create mode 100644 vendor/riscv-isa-sim/disasm/regnames.cc create mode 100644 vendor/riscv-isa-sim/fdt/fdt.ac create mode 100644 vendor/riscv-isa-sim/fdt/fdt.c create mode 100644 vendor/riscv-isa-sim/fdt/fdt.h create mode 100644 vendor/riscv-isa-sim/fdt/fdt.mk.in create mode 100644 vendor/riscv-isa-sim/fdt/fdt_addresses.c create mode 100644 vendor/riscv-isa-sim/fdt/fdt_empty_tree.c create mode 100644 vendor/riscv-isa-sim/fdt/fdt_overlay.c create mode 100644 vendor/riscv-isa-sim/fdt/fdt_ro.c create mode 100644 vendor/riscv-isa-sim/fdt/fdt_rw.c create mode 100644 vendor/riscv-isa-sim/fdt/fdt_strerror.c create mode 100644 vendor/riscv-isa-sim/fdt/fdt_sw.c create mode 100644 vendor/riscv-isa-sim/fdt/fdt_wip.c create mode 100644 vendor/riscv-isa-sim/fdt/libfdt.h create mode 100644 vendor/riscv-isa-sim/fdt/libfdt_env.h create mode 100644 vendor/riscv-isa-sim/fdt/libfdt_internal.h create mode 100644 vendor/riscv-isa-sim/fesvr/byteorder.h create mode 100644 vendor/riscv-isa-sim/fesvr/context.cc create mode 100644 vendor/riscv-isa-sim/fesvr/context.h create mode 100644 vendor/riscv-isa-sim/fesvr/debug_defines.h create mode 100644 vendor/riscv-isa-sim/fesvr/device.cc create mode 100644 vendor/riscv-isa-sim/fesvr/device.h create mode 100644 vendor/riscv-isa-sim/fesvr/dtm.cc create mode 100644 vendor/riscv-isa-sim/fesvr/dtm.h create mode 100644 vendor/riscv-isa-sim/fesvr/dummy.cc create mode 100644 vendor/riscv-isa-sim/fesvr/elf.h create mode 100644 vendor/riscv-isa-sim/fesvr/elf2hex.cc create mode 100644 vendor/riscv-isa-sim/fesvr/elfloader.cc create mode 100644 vendor/riscv-isa-sim/fesvr/elfloader.h create mode 100644 vendor/riscv-isa-sim/fesvr/fesvr.ac create mode 100644 vendor/riscv-isa-sim/fesvr/fesvr.mk.in create mode 100644 vendor/riscv-isa-sim/fesvr/fesvr.pc.in create mode 100644 vendor/riscv-isa-sim/fesvr/htif.cc create mode 100644 vendor/riscv-isa-sim/fesvr/htif.h create mode 100644 vendor/riscv-isa-sim/fesvr/htif_hexwriter.cc create mode 100644 vendor/riscv-isa-sim/fesvr/htif_hexwriter.h create mode 100644 vendor/riscv-isa-sim/fesvr/htif_pthread.cc create mode 100644 vendor/riscv-isa-sim/fesvr/htif_pthread.h create mode 100644 vendor/riscv-isa-sim/fesvr/memif.cc create mode 100644 vendor/riscv-isa-sim/fesvr/memif.h create mode 100644 vendor/riscv-isa-sim/fesvr/option_parser.cc create mode 100644 vendor/riscv-isa-sim/fesvr/option_parser.h create mode 100644 vendor/riscv-isa-sim/fesvr/rfb.cc create mode 100644 vendor/riscv-isa-sim/fesvr/rfb.h create mode 100644 vendor/riscv-isa-sim/fesvr/syscall.cc create mode 100644 vendor/riscv-isa-sim/fesvr/syscall.h create mode 100644 vendor/riscv-isa-sim/fesvr/term.cc create mode 100644 vendor/riscv-isa-sim/fesvr/term.h create mode 100644 vendor/riscv-isa-sim/fesvr/tsi.cc create mode 100644 vendor/riscv-isa-sim/fesvr/tsi.h create mode 100644 vendor/riscv-isa-sim/riscv-disasm.pc.in create mode 100644 vendor/riscv-isa-sim/riscv-fesvr.pc.in create mode 100644 vendor/riscv-isa-sim/riscv/abstract_device.h create mode 100644 vendor/riscv-isa-sim/riscv/arith.h create mode 100644 vendor/riscv-isa-sim/riscv/cachesim.cc create mode 100644 vendor/riscv-isa-sim/riscv/cachesim.h create mode 100644 vendor/riscv-isa-sim/riscv/cfg.h create mode 100644 vendor/riscv-isa-sim/riscv/clint.cc create mode 100644 vendor/riscv-isa-sim/riscv/common.h create mode 100644 vendor/riscv-isa-sim/riscv/csrs.cc create mode 100644 vendor/riscv-isa-sim/riscv/csrs.h create mode 100644 vendor/riscv-isa-sim/riscv/debug_defines.h create mode 100644 vendor/riscv-isa-sim/riscv/debug_module.cc create mode 100644 vendor/riscv-isa-sim/riscv/debug_module.h create mode 100644 vendor/riscv-isa-sim/riscv/debug_rom_defines.h create mode 100644 vendor/riscv-isa-sim/riscv/decode.h create mode 100644 vendor/riscv-isa-sim/riscv/devices.cc create mode 100644 vendor/riscv-isa-sim/riscv/devices.h create mode 100644 vendor/riscv-isa-sim/riscv/disasm.h create mode 100644 vendor/riscv-isa-sim/riscv/dts.cc create mode 100644 vendor/riscv-isa-sim/riscv/dts.h create mode 100644 vendor/riscv-isa-sim/riscv/encoding.h create mode 100644 vendor/riscv-isa-sim/riscv/entropy_source.h create mode 100644 vendor/riscv-isa-sim/riscv/execute.cc create mode 100644 vendor/riscv-isa-sim/riscv/extension.cc create mode 100644 vendor/riscv-isa-sim/riscv/extension.h create mode 100644 vendor/riscv-isa-sim/riscv/extensions.cc create mode 100644 vendor/riscv-isa-sim/riscv/insn_macros.h create mode 100644 vendor/riscv-isa-sim/riscv/insn_template.cc create mode 100644 vendor/riscv-isa-sim/riscv/insn_template.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/add.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/add16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/add32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/add64.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/add8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/add_uw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/addi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/addiw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/addw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/aes32dsi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/aes32dsmi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/aes32esi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/aes32esmi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/aes64ds.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/aes64dsm.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/aes64es.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/aes64esm.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/aes64im.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/aes64ks1i.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/aes64ks2.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/aes_common.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/amoadd_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/amoadd_w.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/amoand_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/amoand_w.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/amomax_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/amomax_w.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/amomaxu_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/amomaxu_w.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/amomin_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/amomin_w.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/amominu_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/amominu_w.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/amoor_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/amoor_w.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/amoswap_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/amoswap_w.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/amoxor_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/amoxor_w.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/and.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/andi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/andn.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/auipc.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ave.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/bclr.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/bclri.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/bcompress.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/bcompressw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/bdecompress.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/bdecompressw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/beq.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/bext.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/bexti.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/bfp.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/bfpw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/bge.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/bgeu.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/binv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/binvi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/blt.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/bltu.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/bmatflip.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/bmator.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/bmatxor.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/bne.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/bset.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/bseti.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/c_add.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/c_addi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/c_addi4spn.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/c_addw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/c_and.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/c_andi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/c_beqz.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/c_bnez.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/c_ebreak.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/c_fld.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/c_fldsp.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/c_flw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/c_flwsp.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/c_fsd.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/c_fsdsp.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/c_fsw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/c_fswsp.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/c_j.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/c_jal.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/c_jalr.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/c_jr.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/c_li.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/c_lui.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/c_lw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/c_lwsp.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/c_mv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/c_or.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/c_slli.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/c_srai.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/c_srli.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/c_sub.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/c_subw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/c_sw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/c_swsp.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/c_xor.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/cbo_clean.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/cbo_flush.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/cbo_inval.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/cbo_zero.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/clmul.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/clmulh.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/clmulhw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/clmulr.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/clmulrw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/clmulw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/clo16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/clo32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/clo8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/clrs16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/clrs32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/clrs8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/clz.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/clz16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/clz32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/clz8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/clzw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/cmix.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/cmov.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/cmpeq16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/cmpeq8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/cpop.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/cpopw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/cras16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/cras32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/crc32_b.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/crc32_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/crc32_h.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/crc32_w.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/crc32c_b.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/crc32c_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/crc32c_h.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/crc32c_w.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/crsa16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/crsa32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/csrrc.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/csrrci.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/csrrs.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/csrrsi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/csrrw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/csrrwi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ctz.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ctzw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/div.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/divu.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/divuw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/divw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/dret.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ebreak.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ecall.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fadd_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fadd_h.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fadd_q.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fadd_s.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fclass_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fclass_h.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fclass_q.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fclass_s.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_d_h.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_d_l.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_d_lu.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_d_q.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_d_s.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_d_w.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_d_wu.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_h_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_h_l.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_h_lu.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_h_q.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_h_s.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_h_w.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_h_wu.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_l_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_l_h.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_l_q.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_l_s.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_lu_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_lu_h.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_lu_q.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_lu_s.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_q_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_q_h.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_q_l.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_q_lu.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_q_s.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_q_w.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_q_wu.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_s_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_s_h.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_s_l.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_s_lu.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_s_q.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_s_w.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_s_wu.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_w_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_w_h.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_w_q.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_w_s.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_wu_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_wu_h.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_wu_q.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fcvt_wu_s.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fdiv_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fdiv_h.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fdiv_q.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fdiv_s.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fence.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fence_i.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/feq_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/feq_h.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/feq_q.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/feq_s.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fld.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fle_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fle_h.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fle_q.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fle_s.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/flh.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/flq.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/flt_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/flt_h.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/flt_q.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/flt_s.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/flw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fmadd_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fmadd_h.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fmadd_q.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fmadd_s.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fmax_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fmax_h.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fmax_q.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fmax_s.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fmin_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fmin_h.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fmin_q.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fmin_s.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fmsub_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fmsub_h.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fmsub_q.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fmsub_s.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fmul_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fmul_h.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fmul_q.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fmul_s.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fmv_d_x.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fmv_h_x.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fmv_w_x.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fmv_x_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fmv_x_h.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fmv_x_w.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fnmadd_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fnmadd_h.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fnmadd_q.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fnmadd_s.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fnmsub_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fnmsub_h.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fnmsub_q.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fnmsub_s.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fsd.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fsgnj_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fsgnj_h.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fsgnj_q.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fsgnj_s.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fsgnjn_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fsgnjn_h.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fsgnjn_q.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fsgnjn_s.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fsgnjx_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fsgnjx_h.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fsgnjx_q.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fsgnjx_s.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fsh.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fsl.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fslw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fsq.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fsqrt_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fsqrt_h.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fsqrt_q.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fsqrt_s.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fsr.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fsri.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fsriw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fsrw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fsub_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fsub_h.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fsub_q.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fsub_s.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/fsw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/gorc.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/gorci.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/gorciw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/gorcw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/grev.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/grevi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/greviw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/grevw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/hfence_gvma.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/hfence_vvma.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/hinval_gvma.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/hinval_vvma.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/hlv_b.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/hlv_bu.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/hlv_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/hlv_h.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/hlv_hu.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/hlv_w.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/hlv_wu.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/hlvx_hu.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/hlvx_wu.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/hsv_b.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/hsv_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/hsv_h.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/hsv_w.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/insb.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/jal.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/jalr.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kabs16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kabs32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kabs8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kabsw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kadd16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kadd32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kadd64.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kadd8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kaddh.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kaddw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kcras16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kcras32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kcrsa16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kcrsa32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kdmabb.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kdmabb16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kdmabt.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kdmabt16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kdmatt.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kdmatt16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kdmbb.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kdmbb16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kdmbt.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kdmbt16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kdmtt.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kdmtt16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/khm16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/khm8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/khmbb.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/khmbb16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/khmbt.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/khmbt16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/khmtt.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/khmtt16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/khmx16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/khmx8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmabb.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmabb32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmabt.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmabt32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmada.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmadrs.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmadrs32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmads.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmads32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmar64.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmatt.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmatt32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmaxda.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmaxda32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmaxds.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmaxds32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmda.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmda32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmmac.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmmac_u.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmmawb.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmmawb2.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmmawb2_u.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmmawb_u.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmmawt.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmmawt2.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmmawt2_u.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmmawt_u.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmmsb.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmmsb_u.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmmwb2.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmmwb2_u.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmmwt2.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmmwt2_u.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmsda.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmsda32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmsr64.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmsxda.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmsxda32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmxda.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kmxda32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ksll16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ksll32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ksll8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kslli16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kslli32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kslli8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kslliw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ksllw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kslra16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kslra16_u.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kslra32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kslra32_u.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kslra8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kslra8_u.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kslraw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kslraw_u.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kstas16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kstas32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kstsa16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kstsa32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ksub16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ksub32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ksub64.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ksub8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ksubh.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ksubw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kwmmul.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/kwmmul_u.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/lb.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/lbu.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ld.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/lh.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/lhu.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/lr_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/lr_w.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/lui.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/lw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/lwu.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/maddr32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/max.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/maxu.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/min.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/minu.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/mret.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/msubr32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/mul.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/mulh.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/mulhsu.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/mulhu.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/mulr64.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/mulsr64.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/mulw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/or.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ori.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/orn.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/pack.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/packh.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/packu.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/packuw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/packw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/pbsad.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/pbsada.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/pkbb16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/pkbt16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/pkbt32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/pktb16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/pktb32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/pktt16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/radd16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/radd32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/radd64.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/radd8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/raddw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/rcras16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/rcras32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/rcrsa16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/rcrsa32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/rem.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/remu.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/remuw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/remw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/rol.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/rolw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ror.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/rori.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/roriw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/rorw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/rstas16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/rstas32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/rstsa16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/rstsa32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/rsub16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/rsub32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/rsub64.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/rsub8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/rsubw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sb.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sc_d.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sc_w.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sclip16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sclip32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sclip8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/scmple16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/scmple8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/scmplt16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/scmplt8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sd.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sext_b.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sext_h.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sfence_inval_ir.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sfence_vma.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sfence_w_inval.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sh.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sh1add.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sh1add_uw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sh2add.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sh2add_uw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sh3add.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sh3add_uw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sha256sig0.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sha256sig1.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sha256sum0.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sha256sum1.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sha512sig0.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sha512sig0h.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sha512sig0l.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sha512sig1.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sha512sig1h.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sha512sig1l.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sha512sum0.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sha512sum0r.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sha512sum1.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sha512sum1r.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/shfl.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/shfli.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/shflw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sinval_vma.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sll.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sll16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sll32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sll8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/slli.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/slli16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/slli32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/slli8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/slli_uw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/slliw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sllw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/slo.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sloi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sloiw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/slow.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/slt.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/slti.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sltiu.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sltu.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sm3p0.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sm3p1.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sm4_common.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sm4ed.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sm4ks.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smal.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smalbb.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smalbt.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smalda.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smaldrs.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smalds.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smaltt.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smalxda.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smalxds.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smaqa.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smaqa_su.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smar64.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smax16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smax32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smax8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smbb16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smbt16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smbt32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smdrs.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smdrs32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smds.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smds32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smin16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smin32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smin8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smmul.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smmul_u.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smmwb.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smmwb_u.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smmwt.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smmwt_u.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smslda.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smslxda.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smsr64.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smtt16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smtt32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smul16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smul8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smulx16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smulx8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smxds.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/smxds32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sra.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sra16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sra16_u.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sra32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sra32_u.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sra8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sra8_u.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sra_u.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/srai.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/srai16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/srai16_u.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/srai32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/srai32_u.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/srai8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/srai8_u.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/srai_u.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sraiw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sraiw_u.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sraw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sret.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/srl.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/srl16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/srl16_u.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/srl32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/srl32_u.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/srl8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/srl8_u.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/srli.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/srli16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/srli16_u.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/srli32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/srli32_u.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/srli8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/srli8_u.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/srliw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/srlw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sro.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sroi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sroiw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/srow.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/stas16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/stas32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/stsa16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/stsa32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sub.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sub16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sub32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sub64.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sub8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/subw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sunpkd810.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sunpkd820.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sunpkd830.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sunpkd831.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sunpkd832.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/sw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/uclip16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/uclip32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/uclip8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ucmple16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ucmple8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ucmplt16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ucmplt8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ukadd16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ukadd32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ukadd64.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ukadd8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ukaddh.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ukaddw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ukcras16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ukcras32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ukcrsa16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ukcrsa32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ukmar64.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ukmsr64.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ukstas16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ukstas32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ukstsa16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ukstsa32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/uksub16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/uksub32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/uksub64.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/uksub8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/uksubh.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/uksubw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/umaqa.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/umar64.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/umax16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/umax32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/umax8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/umin16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/umin32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/umin8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/umsr64.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/umul16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/umul8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/umulx16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/umulx8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/unshfl.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/unshfli.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/unshflw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/uradd16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/uradd32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/uradd64.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/uradd8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/uraddw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/urcras16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/urcras32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/urcrsa16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/urcrsa32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/urstas16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/urstas32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/urstsa16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/urstsa32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ursub16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ursub32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ursub64.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ursub8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/ursubw.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vaadd_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vaadd_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vaaddu_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vaaddu_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vadc_vim.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vadc_vvm.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vadc_vxm.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vadd_vi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vadd_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vadd_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamoaddei16_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamoaddei32_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamoaddei64_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamoaddei8_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamoandei16_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamoandei32_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamoandei64_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamoandei8_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamomaxei16_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamomaxei32_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamomaxei64_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamomaxei8_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamomaxuei16_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamomaxuei32_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamomaxuei64_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamomaxuei8_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamominei16_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamominei32_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamominei64_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamominei8_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamominuei16_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamominuei32_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamominuei64_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamominuei8_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamoorei16_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamoorei32_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamoorei64_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamoorei8_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamoswapei16_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamoswapei32_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamoswapei64_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamoswapei8_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamoxorei16_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamoxorei32_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamoxorei64_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vamoxorei8_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vand_vi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vand_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vand_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vasub_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vasub_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vasubu_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vasubu_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vcompress_vm.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vcpop_m.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vdiv_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vdiv_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vdivu_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vdivu_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfadd_vf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfadd_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfclass_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfcvt_f_x_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfcvt_f_xu_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfcvt_rtz_x_f_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfcvt_rtz_xu_f_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfcvt_x_f_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfcvt_xu_f_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfdiv_vf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfdiv_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfirst_m.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfmacc_vf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfmacc_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfmadd_vf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfmadd_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfmax_vf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfmax_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfmerge_vfm.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfmin_vf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfmin_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfmsac_vf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfmsac_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfmsub_vf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfmsub_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfmul_vf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfmul_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfmv_f_s.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfmv_s_f.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfmv_v_f.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfncvt_f_f_w.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfncvt_f_x_w.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfncvt_f_xu_w.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfncvt_rod_f_f_w.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfncvt_rtz_x_f_w.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfncvt_rtz_xu_f_w.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfncvt_x_f_w.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfncvt_xu_f_w.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfnmacc_vf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfnmacc_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfnmadd_vf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfnmadd_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfnmsac_vf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfnmsac_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfnmsub_vf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfnmsub_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfrdiv_vf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfrec7_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfredmax_vs.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfredmin_vs.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfredosum_vs.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfredusum_vs.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfrsqrt7_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfrsub_vf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfsgnj_vf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfsgnj_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfsgnjn_vf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfsgnjn_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfsgnjx_vf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfsgnjx_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfslide1down_vf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfslide1up_vf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfsqrt_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfsub_vf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfsub_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfwadd_vf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfwadd_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfwadd_wf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfwadd_wv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfwcvt_f_f_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfwcvt_f_x_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfwcvt_f_xu_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfwcvt_rtz_x_f_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfwcvt_rtz_xu_f_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfwcvt_x_f_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfwcvt_xu_f_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfwmacc_vf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfwmacc_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfwmsac_vf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfwmsac_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfwmul_vf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfwmul_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfwnmacc_vf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfwnmacc_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfwnmsac_vf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfwnmsac_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfwredosum_vs.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfwredusum_vs.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfwsub_vf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfwsub_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfwsub_wf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vfwsub_wv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vid_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/viota_m.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vl1re16_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vl1re32_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vl1re64_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vl1re8_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vl2re16_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vl2re32_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vl2re64_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vl2re8_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vl4re16_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vl4re32_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vl4re64_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vl4re8_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vl8re16_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vl8re32_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vl8re64_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vl8re8_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vle16_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vle16ff_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vle32_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vle32ff_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vle64_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vle64ff_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vle8_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vle8ff_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vlm_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vloxei16_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vloxei32_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vloxei64_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vloxei8_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vlse16_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vlse32_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vlse64_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vlse8_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vluxei16_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vluxei32_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vluxei64_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vluxei8_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmacc_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmacc_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmadc_vi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmadc_vim.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmadc_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmadc_vvm.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmadc_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmadc_vxm.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmadd_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmadd_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmand_mm.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmandn_mm.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmax_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmax_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmaxu_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmaxu_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmerge_vim.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmerge_vvm.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmerge_vxm.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmfeq_vf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmfeq_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmfge_vf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmfgt_vf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmfle_vf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmfle_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmflt_vf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmflt_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmfne_vf.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmfne_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmin_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmin_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vminu_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vminu_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmnand_mm.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmnor_mm.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmor_mm.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmorn_mm.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmsbc_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmsbc_vvm.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmsbc_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmsbc_vxm.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmsbf_m.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmseq_vi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmseq_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmseq_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmsgt_vi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmsgt_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmsgtu_vi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmsgtu_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmsif_m.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmsle_vi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmsle_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmsle_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmsleu_vi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmsleu_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmsleu_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmslt_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmslt_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmsltu_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmsltu_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmsne_vi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmsne_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmsne_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmsof_m.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmul_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmul_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmulh_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmulh_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmulhsu_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmulhsu_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmulhu_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmulhu_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmv1r_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmv2r_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmv4r_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmv8r_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmv_s_x.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmv_v_i.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmv_v_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmv_v_x.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmv_x_s.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmvnfr_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmxnor_mm.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vmxor_mm.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vnclip_wi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vnclip_wv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vnclip_wx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vnclipu_wi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vnclipu_wv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vnclipu_wx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vnmsac_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vnmsac_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vnmsub_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vnmsub_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vnsra_wi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vnsra_wv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vnsra_wx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vnsrl_wi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vnsrl_wv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vnsrl_wx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vor_vi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vor_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vor_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vredand_vs.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vredmax_vs.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vredmaxu_vs.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vredmin_vs.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vredminu_vs.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vredor_vs.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vredsum_vs.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vredxor_vs.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vrem_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vrem_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vremu_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vremu_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vrgather_vi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vrgather_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vrgather_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vrgatherei16_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vrsub_vi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vrsub_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vs1r_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vs2r_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vs4r_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vs8r_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsadd_vi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsadd_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsadd_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsaddu_vi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsaddu_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsaddu_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsbc_vvm.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsbc_vxm.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vse16_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vse32_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vse64_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vse8_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsetivli.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsetvl.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsetvli.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsext_vf2.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsext_vf4.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsext_vf8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vslide1down_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vslide1up_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vslidedown_vi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vslidedown_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vslideup_vi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vslideup_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsll_vi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsll_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsll_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsm_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsmul_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsmul_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsoxei16_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsoxei32_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsoxei64_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsoxei8_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsra_vi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsra_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsra_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsrl_vi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsrl_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsrl_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsse16_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsse32_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsse64_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsse8_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vssra_vi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vssra_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vssra_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vssrl_vi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vssrl_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vssrl_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vssub_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vssub_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vssubu_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vssubu_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsub_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsub_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsuxei16_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsuxei32_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsuxei64_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vsuxei8_v.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vwadd_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vwadd_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vwadd_wv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vwadd_wx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vwaddu_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vwaddu_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vwaddu_wv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vwaddu_wx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vwmacc_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vwmacc_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vwmaccsu_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vwmaccsu_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vwmaccu_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vwmaccu_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vwmaccus_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vwmul_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vwmul_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vwmulsu_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vwmulsu_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vwmulu_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vwmulu_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vwredsum_vs.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vwredsumu_vs.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vwsub_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vwsub_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vwsub_wv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vwsub_wx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vwsubu_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vwsubu_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vwsubu_wv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vwsubu_wx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vxor_vi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vxor_vv.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vxor_vx.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vzext_vf2.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vzext_vf4.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/vzext_vf8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/wfi.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/xnor.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/xor.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/xori.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/xperm16.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/xperm32.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/xperm4.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/xperm8.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/zunpkd810.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/zunpkd820.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/zunpkd830.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/zunpkd831.h create mode 100644 vendor/riscv-isa-sim/riscv/insns/zunpkd832.h create mode 100644 vendor/riscv-isa-sim/riscv/interactive.cc create mode 100644 vendor/riscv-isa-sim/riscv/isa_parser.cc create mode 100644 vendor/riscv-isa-sim/riscv/isa_parser.h create mode 100644 vendor/riscv-isa-sim/riscv/jtag_dtm.cc create mode 100644 vendor/riscv-isa-sim/riscv/jtag_dtm.h create mode 100644 vendor/riscv-isa-sim/riscv/log_file.h create mode 100644 vendor/riscv-isa-sim/riscv/memtracer.h create mode 100644 vendor/riscv-isa-sim/riscv/mmio_plugin.h create mode 100644 vendor/riscv-isa-sim/riscv/mmu.cc create mode 100644 vendor/riscv-isa-sim/riscv/mmu.h create mode 100644 vendor/riscv-isa-sim/riscv/opcodes.h create mode 100644 vendor/riscv-isa-sim/riscv/overlap_list.h create mode 100644 vendor/riscv-isa-sim/riscv/platform.h create mode 100644 vendor/riscv-isa-sim/riscv/processor.cc create mode 100644 vendor/riscv-isa-sim/riscv/processor.h create mode 100644 vendor/riscv-isa-sim/riscv/remote_bitbang.cc create mode 100644 vendor/riscv-isa-sim/riscv/remote_bitbang.h create mode 100644 vendor/riscv-isa-sim/riscv/riscv.ac create mode 100644 vendor/riscv-isa-sim/riscv/riscv.mk.in create mode 100644 vendor/riscv-isa-sim/riscv/rocc.cc create mode 100644 vendor/riscv-isa-sim/riscv/rocc.h create mode 100644 vendor/riscv-isa-sim/riscv/rom.cc create mode 100644 vendor/riscv-isa-sim/riscv/sim.cc create mode 100644 vendor/riscv-isa-sim/riscv/sim.h create mode 100644 vendor/riscv-isa-sim/riscv/simif.h create mode 100644 vendor/riscv-isa-sim/riscv/tracer.h create mode 100644 vendor/riscv-isa-sim/riscv/trap.h create mode 100644 vendor/riscv-isa-sim/riscv/triggers.cc create mode 100644 vendor/riscv-isa-sim/riscv/triggers.h create mode 100644 vendor/riscv-isa-sim/scripts/config.guess create mode 100644 vendor/riscv-isa-sim/scripts/config.sub create mode 100755 vendor/riscv-isa-sim/scripts/install.sh create mode 100755 vendor/riscv-isa-sim/scripts/mk-install-dirs.sh create mode 100755 vendor/riscv-isa-sim/scripts/vcs-version.sh create mode 100644 vendor/riscv-isa-sim/softfloat/f128_add.c create mode 100755 vendor/riscv-isa-sim/softfloat/f128_classify.c create mode 100644 vendor/riscv-isa-sim/softfloat/f128_div.c create mode 100644 vendor/riscv-isa-sim/softfloat/f128_eq.c create mode 100644 vendor/riscv-isa-sim/softfloat/f128_eq_signaling.c create mode 100644 vendor/riscv-isa-sim/softfloat/f128_isSignalingNaN.c create mode 100644 vendor/riscv-isa-sim/softfloat/f128_le.c create mode 100644 vendor/riscv-isa-sim/softfloat/f128_le_quiet.c create mode 100644 vendor/riscv-isa-sim/softfloat/f128_lt.c create mode 100644 vendor/riscv-isa-sim/softfloat/f128_lt_quiet.c create mode 100644 vendor/riscv-isa-sim/softfloat/f128_mul.c create mode 100644 vendor/riscv-isa-sim/softfloat/f128_mulAdd.c create mode 100644 vendor/riscv-isa-sim/softfloat/f128_rem.c create mode 100644 vendor/riscv-isa-sim/softfloat/f128_roundToInt.c create mode 100644 vendor/riscv-isa-sim/softfloat/f128_sqrt.c create mode 100644 vendor/riscv-isa-sim/softfloat/f128_sub.c create mode 100644 vendor/riscv-isa-sim/softfloat/f128_to_f16.c create mode 100644 vendor/riscv-isa-sim/softfloat/f128_to_f32.c create mode 100644 vendor/riscv-isa-sim/softfloat/f128_to_f64.c create mode 100644 vendor/riscv-isa-sim/softfloat/f128_to_i32.c create mode 100644 vendor/riscv-isa-sim/softfloat/f128_to_i32_r_minMag.c create mode 100644 vendor/riscv-isa-sim/softfloat/f128_to_i64.c create mode 100644 vendor/riscv-isa-sim/softfloat/f128_to_i64_r_minMag.c create mode 100644 vendor/riscv-isa-sim/softfloat/f128_to_ui32.c create mode 100644 vendor/riscv-isa-sim/softfloat/f128_to_ui32_r_minMag.c create mode 100644 vendor/riscv-isa-sim/softfloat/f128_to_ui64.c create mode 100644 vendor/riscv-isa-sim/softfloat/f128_to_ui64_r_minMag.c create mode 100644 vendor/riscv-isa-sim/softfloat/f16_add.c create mode 100755 vendor/riscv-isa-sim/softfloat/f16_classify.c create mode 100644 vendor/riscv-isa-sim/softfloat/f16_div.c create mode 100644 vendor/riscv-isa-sim/softfloat/f16_eq.c create mode 100644 vendor/riscv-isa-sim/softfloat/f16_eq_signaling.c create mode 100644 vendor/riscv-isa-sim/softfloat/f16_isSignalingNaN.c create mode 100644 vendor/riscv-isa-sim/softfloat/f16_le.c create mode 100644 vendor/riscv-isa-sim/softfloat/f16_le_quiet.c create mode 100644 vendor/riscv-isa-sim/softfloat/f16_lt.c create mode 100644 vendor/riscv-isa-sim/softfloat/f16_lt_quiet.c create mode 100644 vendor/riscv-isa-sim/softfloat/f16_mul.c create mode 100644 vendor/riscv-isa-sim/softfloat/f16_mulAdd.c create mode 100644 vendor/riscv-isa-sim/softfloat/f16_rem.c create mode 100644 vendor/riscv-isa-sim/softfloat/f16_roundToInt.c create mode 100644 vendor/riscv-isa-sim/softfloat/f16_sqrt.c create mode 100644 vendor/riscv-isa-sim/softfloat/f16_sub.c create mode 100644 vendor/riscv-isa-sim/softfloat/f16_to_f128.c create mode 100644 vendor/riscv-isa-sim/softfloat/f16_to_f32.c create mode 100644 vendor/riscv-isa-sim/softfloat/f16_to_f64.c create mode 100644 vendor/riscv-isa-sim/softfloat/f16_to_i16.c create mode 100644 vendor/riscv-isa-sim/softfloat/f16_to_i32.c create mode 100644 vendor/riscv-isa-sim/softfloat/f16_to_i32_r_minMag.c create mode 100644 vendor/riscv-isa-sim/softfloat/f16_to_i64.c create mode 100644 vendor/riscv-isa-sim/softfloat/f16_to_i64_r_minMag.c create mode 100644 vendor/riscv-isa-sim/softfloat/f16_to_i8.c create mode 100644 vendor/riscv-isa-sim/softfloat/f16_to_ui16.c create mode 100644 vendor/riscv-isa-sim/softfloat/f16_to_ui32.c create mode 100644 vendor/riscv-isa-sim/softfloat/f16_to_ui32_r_minMag.c create mode 100644 vendor/riscv-isa-sim/softfloat/f16_to_ui64.c create mode 100644 vendor/riscv-isa-sim/softfloat/f16_to_ui64_r_minMag.c create mode 100644 vendor/riscv-isa-sim/softfloat/f16_to_ui8.c create mode 100644 vendor/riscv-isa-sim/softfloat/f32_add.c create mode 100755 vendor/riscv-isa-sim/softfloat/f32_classify.c create mode 100644 vendor/riscv-isa-sim/softfloat/f32_div.c create mode 100644 vendor/riscv-isa-sim/softfloat/f32_eq.c create mode 100644 vendor/riscv-isa-sim/softfloat/f32_eq_signaling.c create mode 100644 vendor/riscv-isa-sim/softfloat/f32_isSignalingNaN.c create mode 100644 vendor/riscv-isa-sim/softfloat/f32_le.c create mode 100644 vendor/riscv-isa-sim/softfloat/f32_le_quiet.c create mode 100644 vendor/riscv-isa-sim/softfloat/f32_lt.c create mode 100644 vendor/riscv-isa-sim/softfloat/f32_lt_quiet.c create mode 100644 vendor/riscv-isa-sim/softfloat/f32_mul.c create mode 100644 vendor/riscv-isa-sim/softfloat/f32_mulAdd.c create mode 100644 vendor/riscv-isa-sim/softfloat/f32_rem.c create mode 100644 vendor/riscv-isa-sim/softfloat/f32_roundToInt.c create mode 100644 vendor/riscv-isa-sim/softfloat/f32_sqrt.c create mode 100644 vendor/riscv-isa-sim/softfloat/f32_sub.c create mode 100644 vendor/riscv-isa-sim/softfloat/f32_to_f128.c create mode 100644 vendor/riscv-isa-sim/softfloat/f32_to_f16.c create mode 100644 vendor/riscv-isa-sim/softfloat/f32_to_f64.c create mode 100644 vendor/riscv-isa-sim/softfloat/f32_to_i16.c create mode 100644 vendor/riscv-isa-sim/softfloat/f32_to_i32.c create mode 100644 vendor/riscv-isa-sim/softfloat/f32_to_i32_r_minMag.c create mode 100644 vendor/riscv-isa-sim/softfloat/f32_to_i64.c create mode 100644 vendor/riscv-isa-sim/softfloat/f32_to_i64_r_minMag.c create mode 100644 vendor/riscv-isa-sim/softfloat/f32_to_ui16.c create mode 100644 vendor/riscv-isa-sim/softfloat/f32_to_ui32.c create mode 100644 vendor/riscv-isa-sim/softfloat/f32_to_ui32_r_minMag.c create mode 100644 vendor/riscv-isa-sim/softfloat/f32_to_ui64.c create mode 100644 vendor/riscv-isa-sim/softfloat/f32_to_ui64_r_minMag.c create mode 100644 vendor/riscv-isa-sim/softfloat/f64_add.c create mode 100755 vendor/riscv-isa-sim/softfloat/f64_classify.c create mode 100644 vendor/riscv-isa-sim/softfloat/f64_div.c create mode 100644 vendor/riscv-isa-sim/softfloat/f64_eq.c create mode 100644 vendor/riscv-isa-sim/softfloat/f64_eq_signaling.c create mode 100644 vendor/riscv-isa-sim/softfloat/f64_isSignalingNaN.c create mode 100644 vendor/riscv-isa-sim/softfloat/f64_le.c create mode 100644 vendor/riscv-isa-sim/softfloat/f64_le_quiet.c create mode 100644 vendor/riscv-isa-sim/softfloat/f64_lt.c create mode 100644 vendor/riscv-isa-sim/softfloat/f64_lt_quiet.c create mode 100644 vendor/riscv-isa-sim/softfloat/f64_mul.c create mode 100644 vendor/riscv-isa-sim/softfloat/f64_mulAdd.c create mode 100644 vendor/riscv-isa-sim/softfloat/f64_rem.c create mode 100644 vendor/riscv-isa-sim/softfloat/f64_roundToInt.c create mode 100644 vendor/riscv-isa-sim/softfloat/f64_sqrt.c create mode 100644 vendor/riscv-isa-sim/softfloat/f64_sub.c create mode 100644 vendor/riscv-isa-sim/softfloat/f64_to_f128.c create mode 100644 vendor/riscv-isa-sim/softfloat/f64_to_f16.c create mode 100644 vendor/riscv-isa-sim/softfloat/f64_to_f32.c create mode 100644 vendor/riscv-isa-sim/softfloat/f64_to_i32.c create mode 100644 vendor/riscv-isa-sim/softfloat/f64_to_i32_r_minMag.c create mode 100644 vendor/riscv-isa-sim/softfloat/f64_to_i64.c create mode 100644 vendor/riscv-isa-sim/softfloat/f64_to_i64_r_minMag.c create mode 100644 vendor/riscv-isa-sim/softfloat/f64_to_ui32.c create mode 100644 vendor/riscv-isa-sim/softfloat/f64_to_ui32_r_minMag.c create mode 100644 vendor/riscv-isa-sim/softfloat/f64_to_ui64.c create mode 100644 vendor/riscv-isa-sim/softfloat/f64_to_ui64_r_minMag.c create mode 100644 vendor/riscv-isa-sim/softfloat/fall_maxmin.c create mode 100644 vendor/riscv-isa-sim/softfloat/fall_reciprocal.c create mode 100644 vendor/riscv-isa-sim/softfloat/i32_to_f128.c create mode 100644 vendor/riscv-isa-sim/softfloat/i32_to_f16.c create mode 100644 vendor/riscv-isa-sim/softfloat/i32_to_f32.c create mode 100644 vendor/riscv-isa-sim/softfloat/i32_to_f64.c create mode 100644 vendor/riscv-isa-sim/softfloat/i64_to_f128.c create mode 100644 vendor/riscv-isa-sim/softfloat/i64_to_f16.c create mode 100644 vendor/riscv-isa-sim/softfloat/i64_to_f32.c create mode 100644 vendor/riscv-isa-sim/softfloat/i64_to_f64.c create mode 100644 vendor/riscv-isa-sim/softfloat/internals.h create mode 100644 vendor/riscv-isa-sim/softfloat/platform.h create mode 100644 vendor/riscv-isa-sim/softfloat/primitiveTypes.h create mode 100644 vendor/riscv-isa-sim/softfloat/primitives.h create mode 100644 vendor/riscv-isa-sim/softfloat/s_add128.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_add256M.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_addCarryM.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_addComplCarryM.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_addM.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_addMagsF128.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_addMagsF16.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_addMagsF32.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_addMagsF64.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_approxRecip32_1.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_approxRecipSqrt32_1.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_approxRecipSqrt_1Ks.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_approxRecip_1Ks.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_commonNaNToF128UI.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_commonNaNToF16UI.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_commonNaNToF32UI.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_commonNaNToF64UI.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_compare128M.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_compare96M.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_countLeadingZeros16.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_countLeadingZeros32.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_countLeadingZeros64.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_countLeadingZeros8.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_eq128.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_f128UIToCommonNaN.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_f16UIToCommonNaN.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_f32UIToCommonNaN.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_f64UIToCommonNaN.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_le128.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_lt128.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_mul128By32.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_mul128MTo256M.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_mul128To256M.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_mul64ByShifted32To128.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_mul64To128.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_mul64To128M.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_mulAddF128.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_mulAddF16.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_mulAddF32.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_mulAddF64.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_negXM.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_normRoundPackToF128.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_normRoundPackToF16.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_normRoundPackToF32.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_normRoundPackToF64.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_normSubnormalF128Sig.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_normSubnormalF16Sig.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_normSubnormalF32Sig.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_normSubnormalF64Sig.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_propagateNaNF128UI.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_propagateNaNF16UI.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_propagateNaNF32UI.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_propagateNaNF64UI.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_remStepMBy32.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_roundMToI64.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_roundMToUI64.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_roundPackMToI64.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_roundPackMToUI64.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_roundPackToF128.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_roundPackToF16.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_roundPackToF32.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_roundPackToF64.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_roundPackToI32.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_roundPackToI64.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_roundPackToUI32.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_roundPackToUI64.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_roundToI32.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_roundToI64.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_roundToUI32.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_roundToUI64.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_shiftRightJam128.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_shiftRightJam128Extra.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_shiftRightJam256M.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_shiftRightJam32.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_shiftRightJam64.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_shiftRightJam64Extra.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_shortShiftLeft128.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_shortShiftLeft64To96M.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_shortShiftRight128.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_shortShiftRightExtendM.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_shortShiftRightJam128.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_shortShiftRightJam128Extra.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_shortShiftRightJam64.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_shortShiftRightJam64Extra.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_shortShiftRightM.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_sub128.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_sub1XM.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_sub256M.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_subM.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_subMagsF128.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_subMagsF16.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_subMagsF32.c create mode 100644 vendor/riscv-isa-sim/softfloat/s_subMagsF64.c create mode 100644 vendor/riscv-isa-sim/softfloat/softfloat.ac create mode 100644 vendor/riscv-isa-sim/softfloat/softfloat.h create mode 100644 vendor/riscv-isa-sim/softfloat/softfloat.mk.in create mode 100644 vendor/riscv-isa-sim/softfloat/softfloat_raiseFlags.c create mode 100644 vendor/riscv-isa-sim/softfloat/softfloat_state.c create mode 100644 vendor/riscv-isa-sim/softfloat/softfloat_types.h create mode 100644 vendor/riscv-isa-sim/softfloat/specialize.h create mode 100644 vendor/riscv-isa-sim/softfloat/ui32_to_f128.c create mode 100644 vendor/riscv-isa-sim/softfloat/ui32_to_f16.c create mode 100644 vendor/riscv-isa-sim/softfloat/ui32_to_f32.c create mode 100644 vendor/riscv-isa-sim/softfloat/ui32_to_f64.c create mode 100644 vendor/riscv-isa-sim/softfloat/ui64_to_f128.c create mode 100644 vendor/riscv-isa-sim/softfloat/ui64_to_f16.c create mode 100644 vendor/riscv-isa-sim/softfloat/ui64_to_f32.c create mode 100644 vendor/riscv-isa-sim/softfloat/ui64_to_f64.c create mode 100644 vendor/riscv-isa-sim/spike_dasm/spike-dasm.cc create mode 100644 vendor/riscv-isa-sim/spike_dasm/spike_dasm.ac create mode 100644 vendor/riscv-isa-sim/spike_dasm/spike_dasm.mk.in create mode 100644 vendor/riscv-isa-sim/spike_dasm/spike_dasm_option_parser.cc create mode 100644 vendor/riscv-isa-sim/spike_main/spike-log-parser.cc create mode 100644 vendor/riscv-isa-sim/spike_main/spike.cc create mode 100644 vendor/riscv-isa-sim/spike_main/spike_main.ac create mode 100644 vendor/riscv-isa-sim/spike_main/spike_main.mk.in create mode 100644 vendor/riscv-isa-sim/spike_main/termios-xspike.cc create mode 100644 vendor/riscv-isa-sim/spike_main/xspike.cc create mode 100755 vendor/riscv-isa-sim/tests/ebreak.py create mode 100644 vendor/riscv-isa-sim/tests/ebreak.s create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/Makefile create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/crt.S create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/encoding.h create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/Makefile create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/gen_pmp_test.cc create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_11.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_12.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_13.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_14.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_15.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_16.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_17.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_00.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_11.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_12.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_13.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_14.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_15.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_16.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_17.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_00.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_11.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_12.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_13.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_14.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_15.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_16.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_17.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_00.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_11.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_12.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_13.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_14.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_15.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_16.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_17.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_00.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_11.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_12.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_13.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_14.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_15.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_16.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_17.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_00.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_11.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_12.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_13.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_14.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_15.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_16.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_17.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_00.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_11.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_12.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_13.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_14.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_15.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_16.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_17.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_00.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_11.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_12.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_13.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_14.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_15.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_16.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_17.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_00.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_11.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_12.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_13.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_14.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_15.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_16.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_17.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_00.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_11.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_12.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_13.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_14.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_15.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_16.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_17.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_00.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_11.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_12.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_13.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_14.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_15.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_16.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_17.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_00.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_11.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_12.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_13.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_14.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_15.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_16.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_17.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_00.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_11.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_12.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_13.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_14.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_15.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_16.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_17.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_00.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_11.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_12.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_13.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_14.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_15.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_16.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_17.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_00.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_11.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_12.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_13.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_14.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_15.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_16.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_17.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_00.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_11.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_12.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_13.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_14.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_15.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_16.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_17.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_00.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_11.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_12.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_13.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_14.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_15.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_16.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_17.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_00.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_11.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_12.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_13.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_14.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_15.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_16.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_17.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_00.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_11.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_12.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_13.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_14.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_15.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_16.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_17.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_00.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_11.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_12.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_13.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_14.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_15.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_16.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_17.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_00.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_11.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_12.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_13.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_14.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_15.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_16.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_17.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_00.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_11.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_12.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_13.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_14.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_15.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_16.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_17.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_00.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_11.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_12.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_13.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_14.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_15.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_16.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_17.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_00.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_11.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_12.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_13.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_14.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_15.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_16.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_17.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_00.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_01.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_02.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_03.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_04.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_05.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_06.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_07.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp0_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp0_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp1_mml0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp1_mml1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex0_umode0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex0_umode1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex1_umode0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex1_umode1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex0_umode0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex0_umode1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex1_umode0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex1_umode1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex0_umode0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex0_umode1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex1_umode0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex1_umode1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex0_umode0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex0_umode1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex1_umode0.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex1_umode1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x0_cfgl0_typex0_umode1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x0_cfgl0_typex1_umode1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x0_cfgl1_typex0_umode1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x0_cfgl1_typex1_umode1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x1_cfgl0_typex0_umode1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x1_cfgl0_typex1_umode1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x1_cfgl1_typex0_umode1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x1_cfgl1_typex1_umode1.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_csr_1.cc_skel create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_csr_1.h create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_ok_1.cc_skel create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_ok_1.h create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_ok_share_1.cc_skel create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_ok_share_1.h create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/gengen_tool/Makefile.inc create mode 100755 vendor/riscv-isa-sim/tests/mseccfg/gengen_tool/gengen create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/mseccfg_test.ld create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/pmp_enhancement_sail_spike_unit_test.doc create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/syscalls.c create mode 100644 vendor/riscv-isa-sim/tests/mseccfg/util.h create mode 100644 vendor/riscv-isa-sim/tests/testlib.py diff --git a/vendor/riscv-isa-sim/.github/workflows/apt-packages.txt b/vendor/riscv-isa-sim/.github/workflows/apt-packages.txt new file mode 100644 index 00000000..e153391f --- /dev/null +++ b/vendor/riscv-isa-sim/.github/workflows/apt-packages.txt @@ -0,0 +1,2 @@ +build-essential +device-tree-compiler diff --git a/vendor/riscv-isa-sim/.github/workflows/continuous-integration.yml b/vendor/riscv-isa-sim/.github/workflows/continuous-integration.yml new file mode 100644 index 00000000..aeaf460e --- /dev/null +++ b/vendor/riscv-isa-sim/.github/workflows/continuous-integration.yml @@ -0,0 +1,28 @@ +# This file describes the GitHub Actions workflow for continuous integration of Spike. +# +# See +# https://help.github.com/en/actions/reference/workflow-syntax-for-github-actions +# for API reference documentation on this file format. + +name: Continuous Integration + +on: + push: + branches: + - master + pull_request: + branches: + - master + + +jobs: + test: + name: Test Spike build + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v2 + + - name: Install Dependencies + run: sudo xargs apt-get install -y < .github/workflows/apt-packages.txt + + - run: ci-tests/test-spike diff --git a/vendor/riscv-isa-sim/.gitignore b/vendor/riscv-isa-sim/.gitignore new file mode 100644 index 00000000..14326e9c --- /dev/null +++ b/vendor/riscv-isa-sim/.gitignore @@ -0,0 +1,7 @@ +build/ +*.gch +autom4te.cache/ +.*.swp +*.o +*.d +.gdb_history diff --git a/vendor/riscv-isa-sim/ChangeLog.md b/vendor/riscv-isa-sim/ChangeLog.md new file mode 100644 index 00000000..144cb135 --- /dev/null +++ b/vendor/riscv-isa-sim/ChangeLog.md @@ -0,0 +1,36 @@ +Version 1.1.0 +------------- +- Zbkb, Zbkc, Zbkx, Zknd, Zkne, Zknh, Zksed, Zksh scalar cryptography extensions (Zk, Zkn, and Zks groups), v1.0 +- Zkr virtual entropy source emulation, v1.0 +- V extension, v1.0 +- P extension, v0.9.2 +- Zba extension, v1.0 +- Zbb extension, v1.0 +- Zbc extension, v1.0 +- Zbs extension, v1.0 +- Hypervisor extension, v1.0 +- Svnapot extension, v1.0 +- Svpbmt extension, v1.0 +- Svinval extension, v1.0 + +Version 1.0.1-dev +----------------- +- Preliminary support for a subset of the Vector Extension, v0.7.1. +- Support S-mode vectored interrupts (i.e. `stvec[0]` is now writable). +- Added support for dynamic linking of libraries containing MMIO devices. +- Added `--priv` flag to control which privilege modes are available. +- When the commit log is enabled at configure time (`--enable-commitlog`), + it must also be enabled at runtime with the `--log-commits` option. +- Several debug-related additions and changes: + - Added `hasel` debug feature. + - Added `--dm-no-abstract-csr` command-line option. + - Added `--dm-no-halt-groups` command line option. + - Renamed `--progsize` to `--dm-progsize`. + - Renamed `--debug-sba` to `--dm-sba`. + - Renamed `--debug-auth` to `--dm-auth`. + - Renamed `--abstract-rti` to `--dm-abstract-rti`. + - Renamed `--without-hasel` to `--dm-no-hasel`. + +Version 1.0.0 (2019-03-30) +-------------------------- +- First versioned release. diff --git a/vendor/riscv-isa-sim/LICENSE b/vendor/riscv-isa-sim/LICENSE new file mode 100644 index 00000000..34f576ba --- /dev/null +++ b/vendor/riscv-isa-sim/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) 2010-2017, The Regents of the University of California +(Regents). All Rights Reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +3. Neither the name of the Regents nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, +SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING +OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS +BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED +HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE +MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. diff --git a/vendor/riscv-isa-sim/Makefile.in b/vendor/riscv-isa-sim/Makefile.in new file mode 100644 index 00000000..d4cb83e7 --- /dev/null +++ b/vendor/riscv-isa-sim/Makefile.in @@ -0,0 +1,524 @@ +#========================================================================= +# Toplevel Makefile for the Modular C++ Build System +#========================================================================= +# Please read the documenation in 'mcppbs-doc.txt' for more details on +# how the Modular C++ Build System works. For most projects, a developer +# will not need to make any changes to this makefile. The key targets +# are as follows: +# +# - default : build all libraries and programs +# - check : build and run all unit tests +# - install : install headers, project library, and some programs +# - clean : remove all generated content (except autoconf files) +# - dist : make a source tarball +# - distcheck : make a source tarball, untar it, check it, clean it +# - distclean : remove everything +# + +#------------------------------------------------------------------------- +# Basic setup +#------------------------------------------------------------------------- + +# Remove all default implicit rules since they can cause subtle bugs +# and they just make things run slower +.SUFFIXES: +% : %,v +% : RCS/%,v +% : RCS/% +% : s.% +% : SCCS/s.% + +# Default is to build the prereqs of the all target (defined at bottom) +default : all +.PHONY : default + +project_name := @PACKAGE_TARNAME@ +src_dir := @srcdir@ +scripts_dir := $(src_dir)/scripts + +HAVE_INT128 := @HAVE_INT128@ +HAVE_DLOPEN := @HAVE_DLOPEN@ +HAVE_CLANG_PCH := @HAVE_CLANG_PCH@ + +# If the version information is not in the configure script, then we +# assume that we are in a working directory. We use the vcs-version.sh +# script in the scripts directory to generate an appropriate version +# string. Currently the way things are setup we have to run this script +# everytime we run make so the script needs to be as fast as possible. + +ifeq (@PACKAGE_VERSION@,?) + project_ver:=$(shell $(scripts_dir)/vcs-version.sh $(src_dir)) +else + project_ver:=@PACKAGE_VERSION@ +endif + +# Installation directories + +prefix ?= @prefix@ + +INSTALLDIR ?= $(DESTDIR)$(prefix) + +install_hdrs_dir := $(INSTALLDIR)/include +install_libs_dir := $(INSTALLDIR)/lib +install_exes_dir := $(INSTALLDIR)/bin + +#------------------------------------------------------------------------- +# List of subprojects +#------------------------------------------------------------------------- + +sprojs := @subprojects@ +sprojs_enabled := @subprojects_enabled@ + +sprojs_include := -I. -I$(src_dir) $(addprefix -I$(src_dir)/, $(sprojs_enabled)) +VPATH := $(addprefix $(src_dir)/, $(sprojs_enabled)) + +#------------------------------------------------------------------------- +# Programs and flags +#------------------------------------------------------------------------- + +# C++ compiler +# - CPPFLAGS : flags for the preprocessor (eg. -I,-D) +# - CXXFLAGS : flags for C++ compiler (eg. -Wall,-g,-O3) +# +# To allow a user to specify CFLAGS or similar as part of the Make +# command, we also have mcpps-CFLAGS etc. with stuff that shouldn't be +# lost in such a case. +# +# The order of precedence (highest to lowest) is then: +# +# - Specified as part of Make command line +# - Specified as part of running configure +# - Specified here (default-CFLAGS) +# +# These all appear on the command line, from lowest precedence to +# highest. + +default-CFLAGS := -DPREFIX=\"$(prefix)\" -Wall -Wno-unused -Wno-nonportable-include-path -g -O2 -fPIC +default-CXXFLAGS := $(default-CFLAGS) -std=c++17 + +mcppbs-CPPFLAGS := @CPPFLAGS@ +mcppbs-CFLAGS := $(default-CFLAGS) @CFLAGS@ +mcppbs-CXXFLAGS := $(default-CXXFLAGS) @CXXFLAGS@ + +CC := @CC@ +CXX := @CXX@ + +# These are the flags actually used for a C++ compile or a C compile. +# The language-specific flags come after the preprocessor flags, but +# user-supplied flags always take precedence. +all-cxx-flags := \ + $(mcppbs-CPPFLAGS) $(mcppbs-CXXFLAGS) $(CPPFLAGS) $(CXXFLAGS) +all-c-flags := \ + $(mcppbs-CPPFLAGS) $(mcppbs-CFLAGS) $(CPPFLAGS) $(CFLAGS) + +COMPILE := $(CXX) -MMD -MP $(all-cxx-flags) $(sprojs_include) @BOOST_CPPFLAGS@ +COMPILE_C := $(CC) -MMD -MP $(all-c-flags) $(sprojs_include) + +# Linker +# - LDFLAGS : Flags for the linker (eg. -L) +# - LIBS : Library flags (eg. -l) + +mcppbs-LDFLAGS := @LDFLAGS@ @BOOST_LDFLAGS@ +all-link-flags := $(mcppbs-LDFLAGS) $(LDFLAGS) + +comma := , +LD := $(CXX) +LIBS := @LIBS@ @BOOST_ASIO_LIB@ @BOOST_REGEX_LIB@ +LINK := $(LD) -L. $(all-link-flags) -Wl,-rpath,$(install_libs_dir) $(patsubst -L%,-Wl$(comma)-rpath$(comma)%,$(filter -L%,$(LDFLAGS))) + +# Library creation + +AR := @AR@ +RANLIB := @RANLIB@ + +# Host simulator + +RUN := @RUN@ +RUNFLAGS := @RUNFLAGS@ + +# Installation + +MKINSTALLDIRS := $(scripts_dir)/mk-install-dirs.sh +INSTALL := @INSTALL@ +INSTALL_HDR := $(INSTALL) -m 644 +INSTALL_LIB := $(INSTALL) -m 644 +INSTALL_EXE := $(INSTALL) -m 755 +STOW := @stow@ + +# Tests +bintests = $(src_dir)/tests/ebreak.py + +#------------------------------------------------------------------------- +# Include subproject makefile fragments +#------------------------------------------------------------------------- + +sprojs_mk = $(addsuffix .mk, $(sprojs_enabled)) + +-include $(sprojs_mk) + +dist_junk += $(sprojs_mk) + +#------------------------------------------------------------------------- +# Reverse list helper function +#------------------------------------------------------------------------- +# This function is used by the subproject template to reverse the list +# of dependencies. It uses recursion to perform the reversal. +# +# Arguments: +# $(1) : space separated input list +# retval : input list in reverse order +# + +reverse_list = $(call reverse_list_h,$(1),) +define reverse_list_h + $(if $(strip $(1)), \ + $(call reverse_list_h, \ + $(wordlist 2,$(words $(1)),$(1)), \ + $(firstword $(1)) $(2)), \ + $(2)) +endef + +#------------------------------------------------------------------------- +# Template for per subproject rules +#------------------------------------------------------------------------- +# The template is instantiated for each of the subprojects. It relies on +# subprojects defining a certain set of make variables which are all +# prefixed with the subproject name. Since subproject names can have +# dashes in them (and the make variables are assumed to only use +# underscores) the template takes two arguments - one with the regular +# subproject name and one with dashes replaced with underscores. +# +# Arguments: +# $(1) : real subproject name (ie with dashes) +# $(2) : normalized subproject name (ie dashes replaced with underscores) +# + +define subproject_template + +# In some (rare) cases, a subproject might not have any actual object +# files. It might only include header files or program sources. To keep +# things consistent we still want a library for this subproject, so in +# this spectial case we create a dummy source file and thus the build +# system will create a library for this subproject with just the +# corresponding dummy object file. + +ifeq ($$(strip $$($(2)_srcs) $$($(2)_c_srcs)),) +$(2)_srcs += _$(1).cc +$(2)_junk += _$(1).cc +endif + +_$(1).cc : + echo "int _$(2)( int arg ) { return arg; }" > $$@ + +# Build the object files for this subproject + +$(2)_pch := $$(patsubst %.h, %.h.gch, $$($(2)_precompiled_hdrs)) +$(2)_objs := $$(patsubst %.cc, %.o, $$($(2)_srcs)) +$(2)_c_objs := $$(patsubst %.c, %.o, $$($(2)_c_srcs)) +$(2)_deps := $$(patsubst %.o, %.d, $$($(2)_objs)) +$(2)_deps += $$(patsubst %.o, %.d, $$($(2)_c_objs)) +$(2)_deps += $$(patsubst %.h, %.h.d, $$($(2)_precompiled_hdrs)) +$$($(2)_pch) : %.h.gch : %.h + $(COMPILE) -x c++-header $$< -o $$@ +$$($(2)_objs) : %.o : %.cc $$($(2)_gen_hdrs) $$($(2)_pch) + $(COMPILE) $(if $(HAVE_CLANG_PCH), $$(if $$($(2)_pch), -include-pch $$($(2)_pch))) $$($(2)_CFLAGS) -c $$< +$$($(2)_c_objs) : %.o : %.c $$($(2)_gen_hdrs) + $(COMPILE_C) $$($(2)_CFLAGS) -c $$< + +$(2)_junk += $$($(2)_pch) $$($(2)_objs) $$($(2)_c_objs) $$($(2)_deps) \ + $$($(2)_gen_hdrs) + +# Reverse the dependency list so that a given subproject only depends on +# subprojects listed to its right. This is the correct order for linking +# the list of subproject libraries. + +$(2)_reverse_deps := $$(call reverse_list,$$($(2)_subproject_deps)) + +# Build a library for this subproject + +$(2)_lib_libs := $$($(2)_reverse_deps) +$(2)_lib_libnames := $$(patsubst %, lib%.a, $$($(2)_lib_libs)) +$(2)_lib_libarg := $$(patsubst %, -l%, $$($(2)_lib_libs)) +$(2)_lib_libnames_shared := $$(if $$($(2)_install_shared_lib),lib$(1).so,) + +lib$(1).a : $$($(2)_objs) $$($(2)_c_objs) $$($(2)_lib_libnames) + $(AR) rcs $$@ $$^ +lib$(1).so : $$($(2)_objs) $$($(2)_c_objs) $$($(2)_lib_libnames_shared) $$($(2)_lib_libnames) + $(LINK) -shared -o $$@ $(if $(filter Darwin,$(shell uname -s)),-install_name $(install_libs_dir)/$$@) $$^ $$($(2)_lib_libnames) $(LIBS) + +$(2)_junk += lib$(1).a +$(2)_junk += $$(if $$($(2)_install_shared_lib),lib$(1).so,) + +# Build unit tests + +$(2)_test_objs := $$(patsubst %.cc, %.o, $$($(2)_test_srcs)) +$(2)_test_deps := $$(patsubst %.o, %.d, $$($(2)_test_objs)) +$(2)_test_exes := $$(patsubst %.t.cc, %-utst, $$($(2)_test_srcs)) +$(2)_test_outs := $$(patsubst %, %.out, $$($(2)_test_exes)) +$(2)_test_libs := $(1) $$($(2)_reverse_deps) utst +$(2)_test_libnames := $$(patsubst %, lib%.a, $$($(2)_test_libs)) +$(2)_test_libarg := $$(patsubst %, -l%, $$($(2)_test_libs)) + +$$($(2)_test_objs) : %.o : %.cc + $(COMPILE) -c $$< + +$$($(2)_test_exes) : %-utst : %.t.o $$($(2)_test_libnames) + $(LINK) -o $$@ $$< $$($(2)_test_libnames) $(LIBS) + +$(2)_deps += $$($(2)_test_deps) +$(2)_junk += \ + $$($(2)_test_objs) $$($(2)_test_deps) \ + $$($(2)_test_exes) *.junk-dat + +# Run unit tests + +$$($(2)_test_outs) : %.out : % + $(RUN) $(RUNFLAGS) ./$$< default | tee $$@ + +$(2)_junk += $$($(2)_test_outs) + +# Build programs + +$(2)_prog_objs := $$(patsubst %.cc, %.o, $$($(2)_prog_srcs)) +$(2)_prog_deps := $$(patsubst %.o, %.d, $$($(2)_prog_objs)) +$(2)_prog_exes := $$(patsubst %.cc, %, $$($(2)_prog_srcs)) +$(2)_prog_libs := $(1) $$($(2)_reverse_deps) +$(2)_prog_libnames := $$(patsubst %, lib%.a, $$($(2)_prog_libs)) +$(2)_prog_libarg := $$(patsubst %, -l%, $$($(2)_prog_libs)) + +$$($(2)_prog_objs) : %.o : %.cc + $(COMPILE) -c $$< + +$$($(2)_prog_exes) : % : %.o $$($(2)_prog_libnames) + $(LINK) -o $$@ $$< $$($(2)_prog_libnames) $(LIBS) + +$(2)_deps += $$($(2)_prog_deps) +$(2)_junk += $$($(2)_prog_objs) $$($(2)_prog_deps) $$($(2)_prog_exes) + +# Build programs which will be installed + +$(2)_install_prog_objs := $$(patsubst %.cc, %.o, $$($(2)_install_prog_srcs)) +$(2)_install_prog_deps := $$(patsubst %.o, %.d, $$($(2)_install_prog_objs)) +$(2)_install_prog_exes := $$(patsubst %.cc, %, $$($(2)_install_prog_srcs)) + +$$($(2)_install_prog_objs) : %.o : %.cc $$($(2)_gen_hdrs) + $(COMPILE) -c $$< + +$$($(2)_install_prog_exes) : % : %.o $$($(2)_prog_libnames) + $(LINK) -o $$@ $$< $$($(2)_prog_libnames) $(LIBS) + +$(2)_deps += $$($(2)_install_prog_deps) +$(2)_junk += \ + $$($(2)_install_prog_objs) $$($(2)_install_prog_deps) \ + $$($(2)_install_prog_exes) + +# Subproject specific targets + +all-$(1) : lib$(1).a $$($(2)_install_prog_exes) + +check-$(1) : $$($(2)_test_outs) + echo; grep -h -e'Unit Tests' -e'FAILED' -e'Segementation' $$^; echo + +clean-$(1) : + rm -rf $$($(2)_junk) + +.PHONY : all-$(1) check-$(1) clean-$(1) + +# Update running variables + +libs += lib$(1).a +objs += $$($(2)_objs) +srcs += $$(addprefix $(src_dir)/$(1)/, $$($(2)_srcs)) +hdrs += $$(addprefix $(src_dir)/$(1)/, $$($(2)_hdrs)) $$($(2)_gen_hdrs) +junk += $$($(2)_junk) +deps += $$($(2)_deps) + +test_outs += $$($(2)_test_outs) + +install_config_hdrs += $$(if $$($(2)_install_config_hdr),$(1),) +install_hdrs += $$(addprefix $(src_dir)/$(1)/, $$($(2)_install_hdrs)) +install_libs += $$(if $$($(2)_install_lib),lib$(1).a,) +install_libs += $$(if $$($(2)_install_shared_lib),lib$(1).so,) +install_exes += $$($(2)_install_prog_exes) +install_pcs += $$(if $$($(2)_install_lib),riscv-$(1).pc,) + +endef + +# Iterate over the subprojects and call the template for each one + +$(foreach sproj,$(sprojs_enabled), \ + $(eval $(call subproject_template,$(sproj),$(subst -,_,$(sproj))))) + +#------------------------------------------------------------------------- +# Autodependency files +#------------------------------------------------------------------------- + +-include $(deps) + +deps : $(deps) +.PHONY : deps + +#------------------------------------------------------------------------- +# Check +#------------------------------------------------------------------------- + +bintest_outs = $(bintests:=.out) +junk += $(bintest_outs) +%.out: % all + ./$* < /dev/null 2>&1 | tee $@ + +check-cpp : $(test_outs) + @echo + ! grep -h -e'Unit Tests' -e'FAILED' -e'Segmentation' $^ < /dev/null + @echo + +check-bin : $(bintest_outs) + ! tail -n 1 $^ < /dev/null 2>&1 | grep FAILED + +check : check-cpp check-bin + +.PHONY : check + +#------------------------------------------------------------------------- +# Installation +#------------------------------------------------------------------------- + +install-config-hdrs : config.h + $(MKINSTALLDIRS) $(install_hdrs_dir) + for dir in $(install_config_hdrs); \ + do \ + $(MKINSTALLDIRS) $(install_hdrs_dir)/$$dir; \ + $(INSTALL_HDR) $< $(install_hdrs_dir)/$$dir; \ + done + +install-hdrs : $(install_hdrs) + $(MKINSTALLDIRS) $(install_hdrs_dir) + for file in $(subst $(src_dir)/,,$^); \ + do \ + $(MKINSTALLDIRS) $(install_hdrs_dir)/`dirname $$file`; \ + $(INSTALL_HDR) $(src_dir)/$$file $(install_hdrs_dir)/`dirname $$file`; \ + done + +install-libs : $(install_libs) + $(MKINSTALLDIRS) $(install_libs_dir) + for file in $^; \ + do \ + $(INSTALL_LIB) $$file $(install_libs_dir); \ + done + +install-exes : $(install_exes) + $(MKINSTALLDIRS) $(install_exes_dir) + for file in $^; \ + do \ + $(INSTALL_EXE) $$file $(install_exes_dir); \ + done + +install-pc : $(install_pcs) + $(MKINSTALLDIRS) $(install_libs_dir)/pkgconfig/ + for file in $^; \ + do \ + $(INSTALL_HDR) $$file $(install_libs_dir)/pkgconfig/; \ + done + +install : install-hdrs install-config-hdrs install-libs install-exes install-pc + +.PHONY : install install-hdrs install-config-hdrs install-libs install-exes + +#------------------------------------------------------------------------- +# Regenerate configure information +#------------------------------------------------------------------------- + +config.status : $(src_dir)/configure + ./config.status --recheck + +sprojs_mk_in = \ + $(join $(addprefix $(src_dir)/, $(sprojs_enabled)), \ + $(patsubst %, /%.mk.in, $(sprojs_enabled))) + +Makefile : $(src_dir)/Makefile.in $(sprojs_mk_in) config.status + ./config.status + +dist_junk += config.status config.h Makefile config.log + +#------------------------------------------------------------------------- +# Distribution +#------------------------------------------------------------------------- +# The distribution tarball is named project-ver.tar.gz and it includes +# both enabled and disabled subprojects. + +dist_files = \ + $(sprojs) \ + README \ + style-guide.txt \ + mcppbs-uguide.txt \ + scripts \ + configure.ac \ + aclocal.m4 \ + configure \ + config.h.in \ + Makefile.in \ + +dist_dir := $(project_name)-$(project_ver) +dist_tgz := $(project_name)-$(project_ver).tar.gz + +# Notice that when we make the distribution we rewrite the configure.ac +# script with the current version and we rerun autoconf in the new +# source directory so that the distribution will have the proper version +# information. We also rewrite the "Version : " line in the README. + +dist : + rm -rf $(dist_dir) + mkdir $(dist_dir) + tar -C $(src_dir) -cf - $(dist_files) | tar -C $(dist_dir) -xpf - + sed -i.bak 's/^\(# Version :\).*/\1 $(project_ver)/' $(dist_dir)/README + sed -i.bak 's/\( proj_version,\).*/\1 [$(project_ver)])/' $(dist_dir)/configure.ac + cd $(dist_dir) && \ + autoconf && autoheader && \ + rm -rf autom4te.cache configure.ac.bak README.bak + tar -czvf $(dist_tgz) $(dist_dir) + rm -rf $(dist_dir) + +# You can use the distcheck target to try untarring the distribution and +# then running configure, make, make check, and make distclean. A +# "directory is not empty" error means distclean is not removing +# everything. + +distcheck : dist + rm -rf $(dist_dir) + tar -xzvf $(dist_tgz) + mkdir -p $(dist_dir)/build + cd $(dist_dir)/build; ../configure; make; make check; make distclean + rm -rf $(dist_dir) + +junk += $(project_name)-*.tar.gz + +.PHONY : dist distcheck + +#------------------------------------------------------------------------- +# Default +#------------------------------------------------------------------------- + +all : $(install_hdrs) $(install_libs) $(install_exes) +.PHONY : all + +#------------------------------------------------------------------------- +# Makefile debugging +#------------------------------------------------------------------------- +# This handy rule will display the contents of any make variable by +# using the target debug-. So for example, make debug-junk will +# display the contents of the junk variable. + +debug-% : + @echo $* = $($*) + +#------------------------------------------------------------------------- +# Clean up junk +#------------------------------------------------------------------------- + +clean : + rm -rf *~ \#* $(junk) + +distclean : + rm -rf *~ \#* $(junk) $(dist_junk) + +.PHONY : clean distclean diff --git a/vendor/riscv-isa-sim/README.md b/vendor/riscv-isa-sim/README.md new file mode 100644 index 00000000..6da9fab9 --- /dev/null +++ b/vendor/riscv-isa-sim/README.md @@ -0,0 +1,300 @@ +Spike RISC-V ISA Simulator +============================ + +About +------------- + +Spike, the RISC-V ISA Simulator, implements a functional model of one or more +RISC-V harts. It is named after the golden spike used to celebrate the +completion of the US transcontinental railway. + +Spike supports the following RISC-V ISA features: + - RV32I and RV64I base ISAs, v2.1 + - RV32E and RV64E base ISAs, v1.9 + - Zifencei extension, v2.0 + - Zicsr extension, v2.0 + - M extension, v2.0 + - A extension, v2.1 + - F extension, v2.2 + - D extension, v2.2 + - Q extension, v2.2 + - C extension, v2.0 + - Zbkb, Zbkc, Zbkx, Zknd, Zkne, Zknh, Zksed, Zksh scalar cryptography extensions (Zk, Zkn, and Zks groups), v1.0 + - Zkr virtual entropy source emulation, v1.0 + - V extension, v1.0 (_requires a 64-bit host_) + - P extension, v0.9.2 + - Zba extension, v1.0 + - Zbb extension, v1.0 + - Zbc extension, v1.0 + - Zbs extension, v1.0 + - Conformance to both RVWMO and RVTSO (Spike is sequentially consistent) + - Machine, Supervisor, and User modes, v1.11 + - Hypervisor extension, v1.0 + - Svnapot extension, v1.0 + - Svpbmt extension, v1.0 + - Svinval extension, v1.0 + - CMO extension, v1.0 + - Debug v0.14 + +As a Spike extension, the remainder of the proposed +[Bit-Manipulation Extensions](https://github.com/riscv/riscv-bitmanip) +is provided under the Spike-custom extension name _Xbitmanip_. +These instructions (and, of course, the extension name) are not RISC-V +standards. + +These proposed bit-manipulation extensions can be split into further +groups: Zbp, Zbs, Zbe, Zbf, Zbc, Zbm, Zbr, Zbt. Note that Zbc is +ratified, but the original proposal contained some extra instructions +(64-bit carryless multiplies) which are captured here. + +To enable these extensions individually, use the Spike-custom +extension names _XZbp_, _XZbs_, _XZbc_, and so on. + +Versioning and APIs +------------------- + +Projects are versioned primarily to indicate when the API has been extended or +rendered incompatible. In that spirit, Spike aims to follow the +[SemVer](https://semver.org/spec/v2.0.0.html) versioning scheme, in which +major version numbers are incremented when backwards-incompatible API changes +are made; minor version numbers are incremented when new APIs are added; and +patch version numbers are incremented when bugs are fixed in +a backwards-compatible manner. + +Spike's principal public API is the RISC-V ISA. _The C++ interface to Spike's +internals is **not** considered a public API at this time_, and +backwards-incompatible changes to this interface _will_ be made without +incrementing the major version number. + +Build Steps +--------------- + +We assume that the RISCV environment variable is set to the RISC-V tools +install path. + + $ apt-get install device-tree-compiler + $ mkdir build + $ cd build + $ ../configure --prefix=$RISCV + $ make + $ [sudo] make install + +If your system uses the `yum` package manager, you can substitute +`yum install dtc` for the first step. + +Build Steps on OpenBSD +---------------------- + +Install bash, gmake, dtc, and use clang. + + $ pkg_add bash gmake dtc + $ exec bash + $ export CC=cc; export CXX=c++ + $ mkdir build + $ cd build + $ ../configure --prefix=$RISCV + $ gmake + $ [doas] make install + +Compiling and Running a Simple C Program +------------------------------------------- + +Install spike (see Build Steps), riscv-gnu-toolchain, and riscv-pk. + +Write a short C program and name it hello.c. Then, compile it into a RISC-V +ELF binary named hello: + + $ riscv64-unknown-elf-gcc -o hello hello.c + +Now you can simulate the program atop the proxy kernel: + + $ spike pk hello + +Simulating a New Instruction +------------------------------------ + +Adding an instruction to the simulator requires two steps: + + 1. Describe the instruction's functional behavior in the file + riscv/insns/.h. Examine other instructions + in that directory as a starting point. + + 2. Add the opcode and opcode mask to riscv/opcodes.h. Alternatively, + add it to the riscv-opcodes package, and it will do so for you: + ``` + $ cd ../riscv-opcodes + $ vi opcodes // add a line for the new instruction + $ make install + ``` + + 3. Rebuild the simulator. + +Interactive Debug Mode +--------------------------- + +To invoke interactive debug mode, launch spike with -d: + + $ spike -d pk hello + +To see the contents of an integer register (0 is for core 0): + + : reg 0 a0 + +To see the contents of a floating point register: + + : fregs 0 ft0 + +or: + + : fregd 0 ft0 + +depending upon whether you wish to print the register as single- or double-precision. + +To see the contents of a memory location (physical address in hex): + + : mem 2020 + +To see the contents of memory with a virtual address (0 for core 0): + + : mem 0 2020 + +You can advance by one instruction by pressing the enter key. You can also +execute until a desired equality is reached: + + : until pc 0 2020 (stop when pc=2020) + : until reg 0 mie a (stop when register mie=0xa) + : until mem 2020 50a9907311096993 (stop when mem[2020]=50a9907311096993) + +Alternatively, you can execute as long as an equality is true: + + : while mem 2020 50a9907311096993 + +You can continue execution indefinitely by: + + : r + +At any point during execution (even without -d), you can enter the +interactive debug mode with `-`. + +To end the simulation from the debug prompt, press `-` or: + + : q + +Debugging With Gdb +------------------ + +An alternative to interactive debug mode is to attach using gdb. Because spike +tries to be like real hardware, you also need OpenOCD to do that. OpenOCD +doesn't currently know about address translation, so it's not possible to +easily debug programs that are run under `pk`. We'll use the following test +program: +``` +$ cat rot13.c +char text[] = "Vafgehpgvba frgf jnag gb or serr!"; + +// Don't use the stack, because sp isn't set up. +volatile int wait = 1; + +int main() +{ + while (wait) + ; + + // Doesn't actually go on the stack, because there are lots of GPRs. + int i = 0; + while (text[i]) { + char lower = text[i] | 32; + if (lower >= 'a' && lower <= 'm') + text[i] += 13; + else if (lower > 'm' && lower <= 'z') + text[i] -= 13; + i++; + } + +done: + while (!wait) + ; +} +$ cat spike.lds +OUTPUT_ARCH( "riscv" ) + +SECTIONS +{ + . = 0x10010000; + .text : { *(.text) } + .data : { *(.data) } +} +$ riscv64-unknown-elf-gcc -g -Og -o rot13-64.o -c rot13.c +$ riscv64-unknown-elf-gcc -g -Og -T spike.lds -nostartfiles -o rot13-64 rot13-64.o +``` + +To debug this program, first run spike telling it to listen for OpenOCD: +``` +$ spike --rbb-port=9824 -m0x10000000:0x20000 rot13-64 +Listening for remote bitbang connection on port 9824. +``` + +In a separate shell run OpenOCD with the appropriate configuration file: +``` +$ cat spike.cfg +interface remote_bitbang +remote_bitbang_host localhost +remote_bitbang_port 9824 + +set _CHIPNAME riscv +jtag newtap $_CHIPNAME cpu -irlen 5 -expected-id 0x10e31913 + +set _TARGETNAME $_CHIPNAME.cpu +target create $_TARGETNAME riscv -chain-position $_TARGETNAME + +gdb_report_data_abort enable + +init +halt +$ openocd -f spike.cfg +Open On-Chip Debugger 0.10.0-dev-00002-gc3b344d (2017-06-08-12:14) +... +riscv.cpu: target state: halted +``` + +In yet another shell, start your gdb debug session: +``` +tnewsome@compy-vm:~/SiFive/spike-test$ riscv64-unknown-elf-gdb rot13-64 +GNU gdb (GDB) 8.0.50.20170724-git +Copyright (C) 2017 Free Software Foundation, Inc. +License GPLv3+: GNU GPL version 3 or later +This is free software: you are free to change and redistribute it. +There is NO WARRANTY, to the extent permitted by law. Type "show copying" +and "show warranty" for details. +This GDB was configured as "--host=x86_64-pc-linux-gnu --target=riscv64-unknown-elf". +Type "show configuration" for configuration details. +For bug reporting instructions, please see: +. +Find the GDB manual and other documentation resources online at: +. +For help, type "help". +Type "apropos word" to search for commands related to "word"... +Reading symbols from rot13-64...done. +(gdb) target remote localhost:3333 +Remote debugging using localhost:3333 +0x0000000010010004 in main () at rot13.c:8 +8 while (wait) +(gdb) print wait +$1 = 1 +(gdb) print wait=0 +$2 = 0 +(gdb) print text +$3 = "Vafgehpgvba frgf jnag gb or serr!" +(gdb) b done +Breakpoint 1 at 0x10010064: file rot13.c, line 22. +(gdb) c +Continuing. +Disabling abstract command writes to CSRs. + +Breakpoint 1, main () at rot13.c:23 +23 while (!wait) +(gdb) print wait +$4 = 0 +(gdb) print text +... +``` diff --git a/vendor/riscv-isa-sim/VERSION b/vendor/riscv-isa-sim/VERSION new file mode 100644 index 00000000..6ce2a755 --- /dev/null +++ b/vendor/riscv-isa-sim/VERSION @@ -0,0 +1 @@ +#define SPIKE_VERSION "1.1.1-dev" diff --git a/vendor/riscv-isa-sim/aclocal.m4 b/vendor/riscv-isa-sim/aclocal.m4 new file mode 100644 index 00000000..def74dba --- /dev/null +++ b/vendor/riscv-isa-sim/aclocal.m4 @@ -0,0 +1,302 @@ +#========================================================================= +# Local Autoconf Macros +#========================================================================= +# This file contains the macros for the Modular C++ Build System and +# additional autoconf macros which developers can use in their +# configure.ac scripts. Please read the documentation in +# 'mcppbs-doc.txt' for more details on how the Modular C++ Build System +# works. The documenation for each macro should include information +# about the author, date, and copyright. + +#------------------------------------------------------------------------- +# MCPPBS_PROG_INSTALL +#------------------------------------------------------------------------- +# This macro will add an --enable-stow command line option to the +# configure script. When enabled, this macro will first check to see if +# the stow program is available and if so it will set the $stow shell +# variable to the binary name and the $enable_stow shell variable to +# "yes". These variables can be used in a makefile to conditionally use +# stow for installation. +# +# This macro uses two environment variables to help setup default stow +# locations. The $STOW_PREFIX is used for stowing native built packages. +# The packages are staged in $STOW_PREFIX/pkgs and then symlinks are +# created from within $STOW_PREFIX into the pkgs subdirectory. If you +# only do native builds then this is all you need to set. If you don't +# set $STOW_PREFIX then the default is just the normal default prefix +# which is almost always /usr/local. +# +# For non-native builds we probably want to install the packages in a +# different location which includes the host architecture name as part +# of the prefix. For these kind of builds, we can specify the $STOW_ROOT +# environment variable and the effective prefix will be +# $STOW_ROOT/${host_alias} where ${host_alias} is specified on the +# configure command line with "--host". +# +# Here is an example setup: +# +# STOW_ROOT="$HOME/install" +# STOW_ARCH="i386-macosx10.4" +# STOW_PREFIX="${STOW_ROOT}/${STOW_ARCH}" +# + +AC_DEFUN([MCPPBS_PROG_INSTALL], +[ + + # Configure command line option + + AC_ARG_ENABLE(stow, + AS_HELP_STRING(--enable-stow,[Enable stow-based install]), + [enable_stow="yes"],[enable_stow="no"]) + + AC_SUBST([enable_stow]) + + # Environment variables + + AC_ARG_VAR([STOW_ROOT], [Root for non-native stow-based installs]) + AC_ARG_VAR([STOW_PREFIX], [Prefix for stow-based installs]) + + # Check for install script + + AC_PROG_INSTALL +]) + +#------------------------------------------------------------------------- +# MCPPBS_PROG_RUN +# ------------------------------------------------------------------------- +# If we are doing a non-native build then we look for an isa simulator +# to use for running tests. We set the RUN substitution variable to be +# empty for native builds or to the name of the isa simulator for +# non-native builds. Thus a makefile can run compiled programs +# regardless if we are doing a native or non-native build like this: +# +# $(RUN) $(RUNFLAGS) ./test-program +# + +AC_DEFUN([MCPPBS_PROG_RUN], +[ + AS_IF([ test "${build}" != "${host}" ], + [ + AC_CHECK_TOOLS([RUN],[isa-run run],[no]) + AS_IF([ test ${RUN} = "no" ], + [ + AC_MSG_ERROR([Cannot find simulator for target ${target_alias}]) + ]) + ],[ + RUN="" + ]) + AC_SUBST([RUN]) + AC_SUBST([RUNFLAGS]) +]) + +#------------------------------------------------------------------------- +# MCPPBS_SUBPROJECTS([ sproj1, sproj2, ... ]) +#------------------------------------------------------------------------- +# The developer should call this macro with a list of the subprojects +# which make up this project. One should order the list such that any +# given subproject only depends on subprojects listed before it. The +# subproject names can also include an * suffix which indicates that +# this is an optional subproject. Optional subprojects are only included +# as part of the project build if enabled on the configure command line +# with a --enable- flag. The user can also specify that all +# optional subprojects should be included in the build with the +# --enable-optional-subprojects flag. +# +# Subproject names can also include a ** suffix which indicates that it +# is an optional subproject, but there is a group with the same name. +# Thus the --enable- command line option will enable not just the +# subproject sproj but all of the subprojects which are in the group. +# There is no error checking to make sure that if you use the ** suffix +# you actually define a group so be careful. +# +# Both required and optional subprojects should have a 'subproject.ac' +# file. The script's filename should be the abbreivated subproject name +# (assuming the subproject name is sproj then we would use 'sproj.ac') +# The MCPPBS_SUBPROJECTS macro includes the 'subproject.ac' files for +# enabled subprojects. Whitespace and newlines are allowed within the +# list. +# +# Author : Christopher Batten +# Date : September 10, 2008 + +AC_DEFUN([MCPPBS_SUBPROJECTS], +[ + + # Add command line argument to enable all optional subprojects + + AC_ARG_ENABLE(optional-subprojects, + AS_HELP_STRING([--enable-optional-subprojects], + [Enable all optional subprojects])) + + # Loop through the subprojects given in the macro argument + + m4_foreach([MCPPBS_SPROJ],[$1], + [ + + # Determine if this is a required or an optional subproject + + m4_define([MCPPBS_IS_REQ], + m4_bmatch(MCPPBS_SPROJ,[\*+],[false],[true])) + + # Determine if there is a group with the same name + + m4_define([MCPPBS_IS_GROUP], + m4_bmatch(MCPPBS_SPROJ,[\*\*],[true],[false])) + + # Create variations of the subproject name suitable for use as a CPP + # enabled define, a shell enabled variable, and a shell function + + m4_define([MCPPBS_SPROJ_NORM], + m4_normalize(m4_bpatsubsts(MCPPBS_SPROJ,[*],[]))) + + m4_define([MCPPBS_SPROJ_DEFINE], + m4_toupper(m4_bpatsubst(MCPPBS_SPROJ_NORM[]_ENABLED,[-],[_]))) + + m4_define([MCPPBS_SPROJ_FUNC], + m4_bpatsubst(_mpbp_[]MCPPBS_SPROJ_NORM[]_configure,[-],[_])) + + m4_define([MCPPBS_SPROJ_UNDERSCORES], + m4_bpatsubsts(MCPPBS_SPROJ,[-],[_])) + + m4_define([MCPPBS_SPROJ_SHVAR], + m4_bpatsubst(enable_[]MCPPBS_SPROJ_NORM[]_sproj,[-],[_])) + + # Add subproject to our running list + + subprojects="$subprojects MCPPBS_SPROJ_NORM" + + # Process the subproject appropriately. If enabled add it to the + # $enabled_subprojects running shell variable, set a + # SUBPROJECT_ENABLED C define, and include the appropriate + # 'subproject.ac'. + + m4_if(MCPPBS_IS_REQ,[true], + [ + AC_MSG_NOTICE([configuring default subproject : MCPPBS_SPROJ_NORM]) + AC_CONFIG_FILES(MCPPBS_SPROJ_NORM[].mk:MCPPBS_SPROJ_NORM[]/MCPPBS_SPROJ_NORM[].mk.in) + MCPPBS_SPROJ_SHVAR="yes" + subprojects_enabled="$subprojects_enabled MCPPBS_SPROJ_NORM" + AC_DEFINE(MCPPBS_SPROJ_DEFINE,, + [Define if subproject MCPPBS_SPROJ_NORM is enabled]) + m4_include(MCPPBS_SPROJ_NORM[]/MCPPBS_SPROJ_NORM[].ac) + ],[ + + # For optional subprojects we capture the 'subproject.ac' as a + # shell function so that in the MCPPBS_GROUP macro we can just + # call this shell function instead of reading in 'subproject.ac' + # again. + + MCPPBS_SPROJ_FUNC () + { + AC_MSG_NOTICE([configuring optional subproject : MCPPBS_SPROJ_NORM]) + AC_CONFIG_FILES(MCPPBS_SPROJ_NORM[].mk:MCPPBS_SPROJ_NORM[]/MCPPBS_SPROJ_NORM[].mk.in) + MCPPBS_SPROJ_SHVAR="yes" + subprojects_enabled="$subprojects_enabled MCPPBS_SPROJ_NORM" + AC_DEFINE(MCPPBS_SPROJ_DEFINE,, + [Define if subproject MCPPBS_SPROJ_NORM is enabled]) + m4_include(MCPPBS_SPROJ_NORM[]/MCPPBS_SPROJ_NORM[].ac) + }; + + # Optional subprojects add --enable-subproject command line + # options, _if_ the subproject name is not also a group name. + + m4_if(MCPPBS_IS_GROUP,[false], + [ + AC_ARG_ENABLE(MCPPBS_SPROJ_NORM, + AS_HELP_STRING(--enable-MCPPBS_SPROJ_NORM, + [Subproject MCPPBS_SPROJ_NORM]), + [MCPPBS_SPROJ_SHVAR="yes"],[MCPPBS_SPROJ_SHVAR="no"]) + + AS_IF([test "$MCPPBS_SPROJ_SHVAR" = "yes"], + [ + eval "MCPPBS_SPROJ_FUNC" + ],[ + AC_MSG_NOTICE([processing optional subproject : MCPPBS_SPROJ_NORM]) + ]) + + ],[ + + # If the subproject name is also a group name then we need to + # make sure that we set the shell variable for that subproject to + # no so that the group code knows we haven't run it yet. + + AC_MSG_NOTICE([processing optional subproject : MCPPBS_SPROJ_NORM]) + MCPPBS_SPROJ_SHVAR="no" + + ]) + + # Always execute the subproject configure code if we are enabling + # all subprojects. + + AS_IF([ test "$enable_optional_subprojects" = "yes" \ + && test "$MCPPBS_SPROJ_SHVAR" = "no" ], + [ + eval "MCPPBS_SPROJ_FUNC" + ]) + + ]) + + ]) + + # Output make variables + + AC_SUBST([subprojects]) + AC_SUBST([subprojects_enabled]) + +]) + +#------------------------------------------------------------------------- +# MCPPBS_GROUP( [group-name], [ sproj1, sproj2, ... ] ) +#------------------------------------------------------------------------- +# This macro creates a subproject group with the given group-name. When +# a user specifies --enable- the listed subprojects will be +# enabled. Groups can have the same name as a subproject and in that +# case whenever a user specifies --enable- the subprojects +# listed in the corresponding group will also be enabled. Groups are +# useful for specifying related subprojects which are usually enabled +# together, as well as for specifying that a specific optional +# subproject has dependencies on other optional subprojects. +# +# Author : Christopher Batten +# Date : September 10, 2008 + +AC_DEFUN([MCPPBS_GROUP], +[ + + m4_define([MCPPBS_GROUP_NORM], + m4_normalize([$1])) + + m4_define([MCPPBS_GROUP_SHVAR], + m4_bpatsubst(enable_[]MCPPBS_GROUP_NORM[]_group,[-],[_])) + + AC_ARG_ENABLE(MCPPBS_GROUP_NORM, + AS_HELP_STRING(--enable-MCPPBS_GROUP_NORM, + [Group MCPPBS_GROUP_NORM: $2]), + [MCPPBS_GROUP_SHVAR="yes"],[MCPPBS_GROUP_SHVAR="no"]) + + AS_IF([test "$MCPPBS_GROUP_SHVAR" = "yes" ], + [ + AC_MSG_NOTICE([configuring optional group : MCPPBS_GROUP_NORM]) + ]) + + m4_foreach([MCPPBS_SPROJ],[$2], + [ + + m4_define([MCPPBS_SPROJ_NORM], + m4_normalize(MCPPBS_SPROJ)) + + m4_define([MCPPBS_SPROJ_SHVAR], + m4_bpatsubst(enable_[]MCPPBS_SPROJ_NORM[]_sproj,[-],[_])) + + m4_define([MCPPBS_SPROJ_FUNC], + m4_bpatsubst(_mpbp_[]MCPPBS_SPROJ_NORM[]_configure,[-],[_])) + + AS_IF([ test "$MCPPBS_GROUP_SHVAR" = "yes" \ + && test "$MCPPBS_SPROJ_SHVAR" = "no" ], + [ + eval "MCPPBS_SPROJ_FUNC" + ]) + + ]) + +]) diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/Makefile.include b/vendor/riscv-isa-sim/arch_test_target/spike/Makefile.include new file mode 100644 index 00000000..13eacdc1 --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/Makefile.include @@ -0,0 +1,25 @@ +# set TARGETDIR to point to the directory which contains a sub-folder in the same name as the target +export TARGETDIR ?= /scratch/git-repo/github/neel/riscv-isa-sim/arch_test_target + +# set XLEN to max supported XLEN. Allowed values are 32 and 64 +export XLEN ?= 64 + +# name of the target. Note a folder of the same name must exist in the TARGETDIR directory +export RISCV_TARGET ?= spike + +# set the RISCV_DEVICE environment to a single extension you want to compile, simulate and/or verify. +# Leave this blank if you want to iterate through all the supported extensions available in the target +export RISCV_DEVICE ?= + +# set this to a string which needs to be passed to your target Makefile.include files +export RISCV_TARGET_FLAGS ?= + +# set this if you want to enable assertions on the test-suites. Currently no tests support +# assertions. +export RISCV_ASSERT ?= 0 + +# set the number of parallel jobs (along with any other arguments) you would like to execute. Note that the target needs to ensure +# that no common files across jobs are created/overwritten leading to unknown behavior +JOBS= -j1 + + diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/README.md b/vendor/riscv-isa-sim/arch_test_target/spike/README.md new file mode 100644 index 00000000..56af2492 --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/README.md @@ -0,0 +1,58 @@ +# Using the Spike Simulator as an Architectural test model + +This is a reference for running Spike as a target for the RISC-V Architectural Test framework. + +## Getting Spike + +The Spike repository should be cloned from [here](https://github.com/riscv/riscv-isa-sim/), preferably at the same directory level as the riscv-arch-test repository. + +## Building Spike + +The [README.md](../README.md) at the top level of the riscv-isa-sim directory gives details on building an executable spike model. + +## Adding Spike as a target to the Architectural Test framework + +Also at the top level is an ``arch_test_target directory``. This directory contains all the collaterals +required to add Spike as a target to the architectural test framework. + +The file ``arch_test_target/spike/Makefile.include`` contains various parameters which can be set by +the user to modify the instance of spike on which the tests need to be run. +The user can modify the ``XLEN`` variable based on whether 32-bit or 64-bit tests need to be run. +If one would like to run tests of a single extension then set the `RISCV_DEVICE` to that extension +name (eg. M, C, Zifencei, etc). Leaving the ``RISCV_DEVICE`` empty would indicate running all tests +for all extensions available in the ``device/rv{XLEN}i_m`` directory No other variables should be modified. + +Now clone the architectural test framework repo and copy the updated Makefile.include to it: + +``` + $ git clone https://github.com/riscv/riscv-arch-test.git + $ cd riscv-arch-test + $ cp /riscv-isa-sim/arch_test_target/spike/Makefile.include . +``` + +The user will have to modify the ``TARGETDIR`` variable in ``riscv-arch-test/Makefile.include`` to point to the +absolute location of the ``riscv-isa-sim/arch_test_target`` directory. + +You can execute the tests from the root directory of the riscv-arch-test repo: + +``` +make compile simulate verify +``` + +## Updating the target for new tests + +As tests for new extensions are added to the architectural test repo, the spike target (i.e. +arch_test_target directory) will also need to be updated accordingly. Please refer to the [Porting a new target](https://github.com/riscv/riscv-arch-test/blob/master/doc/README.adoc#5-porting-a-new-target) +section for more details on what those changes/updates should be. + + + + + + + + + + + + diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/device/Makefile_common.inc b/vendor/riscv-isa-sim/arch_test_target/spike/device/Makefile_common.inc new file mode 100644 index 00000000..c43222de --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/device/Makefile_common.inc @@ -0,0 +1,34 @@ +TARGET_SIM ?= spike +TARGET_FLAGS ?= $(RISCV_TARGET_FLAGS) +ifeq ($(shell command -v $(TARGET_SIM) 2> /dev/null),) + $(error Target simulator executable '$(TARGET_SIM)` not found) +endif + +RISCV_PREFIX ?= riscv$(XLEN)-unknown-elf- +RISCV_GCC ?= $(RISCV_PREFIX)gcc +RISCV_OBJDUMP ?= $(RISCV_PREFIX)objdump +RISCV_GCC_OPTS ?= -g -static -mcmodel=medany -fvisibility=hidden -nostdlib -nostartfiles $(RVTEST_DEFINES) + +COMPILE_CMD = $$(RISCV_GCC) $(1) $$(RISCV_GCC_OPTS) \ + -I$(ROOTDIR)/riscv-test-suite/env/ \ + -I$(TARGETDIR)/$(RISCV_TARGET)/ \ + -T$(TARGETDIR)/$(RISCV_TARGET)/link.ld \ + $$(<) -o $$@ +OBJ_CMD = $$(RISCV_OBJDUMP) $$@ -D > $$@.objdump; \ + $$(RISCV_OBJDUMP) $$@ --source > $$@.debug + + +COMPILE_TARGET=\ + $(COMPILE_CMD); \ + if [ $$$$? -ne 0 ] ; \ + then \ + echo "\e[31m$$(RISCV_GCC) failed for target $$(@) \e[39m" ; \ + exit 1 ; \ + fi ; \ + $(OBJ_CMD); \ + if [ $$$$? -ne 0 ] ; \ + then \ + echo "\e[31m $$(RISCV_OBJDUMP) failed for target $$(@) \e[39m" ; \ + exit 1 ; \ + fi ; + diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32e_unratified/C/Makefile.include b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32e_unratified/C/Makefile.include new file mode 100644 index 00000000..daf0f434 --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32e_unratified/C/Makefile.include @@ -0,0 +1,7 @@ +include $(TARGETDIR)/spike/device/Makefile_common.inc +RUN_CMD = $(TARGET_SIM) $(TARGET_FLAGS) --isa=rv32ec \ + +signature=$(*).signature.output +signature-granularity=4\ + $< + +RUN_TARGET=\ + $(RUN_CMD) diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32e_unratified/E/Makefile.include b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32e_unratified/E/Makefile.include new file mode 100644 index 00000000..548b17d7 --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32e_unratified/E/Makefile.include @@ -0,0 +1,7 @@ +include $(TARGETDIR)/spike/device/Makefile_common.inc +RUN_CMD = $(TARGET_SIM) $(TARGET_FLAGS) --isa=rv32e \ + +signature=$(*).signature.output +signature-granularity=4\ + $< + +RUN_TARGET=\ + $(RUN_CMD) diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32e_unratified/M/Makefile.include b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32e_unratified/M/Makefile.include new file mode 100644 index 00000000..749c7fc2 --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32e_unratified/M/Makefile.include @@ -0,0 +1,7 @@ +include $(TARGETDIR)/spike/device/Makefile_common.inc +RUN_CMD = $(TARGET_SIM) $(TARGET_FLAGS) --isa=rv32em \ + +signature=$(*).signature.output +signature-granularity=4\ + $< + +RUN_TARGET=\ + $(RUN_CMD) diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/C/Makefile.include b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/C/Makefile.include new file mode 100644 index 00000000..346feaae --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/C/Makefile.include @@ -0,0 +1,7 @@ +include $(TARGETDIR)/spike/device/Makefile_common.inc +RUN_CMD = $(TARGET_SIM) $(TARGET_FLAGS) --isa=rv32ic \ + +signature=$(*).signature.output +signature-granularity=4\ + $< + +RUN_TARGET=\ + $(RUN_CMD) diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/F/Makefile.include b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/F/Makefile.include new file mode 100644 index 00000000..4fb87c62 --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/F/Makefile.include @@ -0,0 +1,7 @@ +include $(TARGETDIR)/spike/device/Makefile_common.inc +RUN_CMD = $(TARGET_SIM) $(TARGET_FLAGS) --isa=rv32if \ + +signature=$(*).signature.output +signature-granularity=4\ + $< + +RUN_TARGET=\ + $(RUN_CMD) diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/I/Makefile.include b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/I/Makefile.include new file mode 100644 index 00000000..740755c0 --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/I/Makefile.include @@ -0,0 +1,7 @@ +include $(TARGETDIR)/spike/device/Makefile_common.inc +RUN_CMD = $(TARGET_SIM) $(TARGET_FLAGS) --isa=rv32i \ + +signature=$(*).signature.output +signature-granularity=4\ + $< + +RUN_TARGET=\ + $(RUN_CMD) diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/M/Makefile.include b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/M/Makefile.include new file mode 100644 index 00000000..5d8de47c --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/M/Makefile.include @@ -0,0 +1,7 @@ +include $(TARGETDIR)/spike/device/Makefile_common.inc +RUN_CMD = $(TARGET_SIM) $(TARGET_FLAGS) --isa=rv32im \ + +signature=$(*).signature.output +signature-granularity=4\ + $< + +RUN_TARGET=\ + $(RUN_CMD) diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/Zifencei/Makefile.include b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/Zifencei/Makefile.include new file mode 100644 index 00000000..740755c0 --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/Zifencei/Makefile.include @@ -0,0 +1,7 @@ +include $(TARGETDIR)/spike/device/Makefile_common.inc +RUN_CMD = $(TARGET_SIM) $(TARGET_FLAGS) --isa=rv32i \ + +signature=$(*).signature.output +signature-granularity=4\ + $< + +RUN_TARGET=\ + $(RUN_CMD) diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/privilege/Makefile.include b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/privilege/Makefile.include new file mode 100644 index 00000000..8275495d --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/privilege/Makefile.include @@ -0,0 +1,7 @@ +include $(TARGETDIR)/spike/device/Makefile_common.inc +RUN_CMD = $(TARGET_SIM) $(TARGET_FLAGS) --isa=rv32ic \ + +signature=$(*).signature.output +signature-granularity=4\ + $< + +RUN_TARGET=\ + $(RUN_CMD) diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/C/Makefile.include b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/C/Makefile.include new file mode 100644 index 00000000..e6ca9fb3 --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/C/Makefile.include @@ -0,0 +1,7 @@ +include $(TARGETDIR)/spike/device/Makefile_common.inc +RUN_CMD = $(TARGET_SIM) $(TARGET_FLAGS) --isa=rv64ic \ + +signature=$(*).signature.output +signature-granularity=4\ + $< + +RUN_TARGET=\ + $(RUN_CMD) diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/D/Makefile.include b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/D/Makefile.include new file mode 100644 index 00000000..26113946 --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/D/Makefile.include @@ -0,0 +1,8 @@ +include $(TARGETDIR)/spike/device/Makefile_common.inc +RUN_CMD = $(TARGET_SIM) $(TARGET_FLAGS) --isa=rv64ifd \ + +signature=$(*).signature.output +signature-granularity=4\ + $< + +RUN_TARGET=\ + $(RUN_CMD) + diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/I/Makefile.include b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/I/Makefile.include new file mode 100644 index 00000000..2c763bf5 --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/I/Makefile.include @@ -0,0 +1,7 @@ +include $(TARGETDIR)/spike/device/Makefile_common.inc +RUN_CMD = $(TARGET_SIM) $(TARGET_FLAGS) --isa=rv64i \ + +signature=$(*).signature.output +signature-granularity=4\ + $< + +RUN_TARGET=\ + $(RUN_CMD) diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/M/Makefile.include b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/M/Makefile.include new file mode 100644 index 00000000..8ce555c6 --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/M/Makefile.include @@ -0,0 +1,8 @@ +include $(TARGETDIR)/spike/device/Makefile_common.inc +RUN_CMD = $(TARGET_SIM) $(TARGET_FLAGS) --isa=rv64im \ + +signature=$(*).signature.output +signature-granularity=4\ + $< + +RUN_TARGET=\ + $(RUN_CMD) + diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/Zifencei/Makefile.include b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/Zifencei/Makefile.include new file mode 100644 index 00000000..2c763bf5 --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/Zifencei/Makefile.include @@ -0,0 +1,7 @@ +include $(TARGETDIR)/spike/device/Makefile_common.inc +RUN_CMD = $(TARGET_SIM) $(TARGET_FLAGS) --isa=rv64i \ + +signature=$(*).signature.output +signature-granularity=4\ + $< + +RUN_TARGET=\ + $(RUN_CMD) diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/privilege/Makefile.include b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/privilege/Makefile.include new file mode 100644 index 00000000..5ef2802f --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/privilege/Makefile.include @@ -0,0 +1,7 @@ +include $(TARGETDIR)/spike/device/Makefile_common.inc +RUN_CMD = $(TARGET_SIM) $(TARGET_FLAGS) --isa=rv64ic \ + +signature=$(*).signature.output +signature-granularity=4\ + $< + +RUN_TARGET=\ + $(RUN_CMD) diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/link.ld b/vendor/riscv-isa-sim/arch_test_target/spike/link.ld new file mode 100644 index 00000000..8ad95e04 --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/link.ld @@ -0,0 +1,18 @@ +OUTPUT_ARCH( "riscv" ) +ENTRY(rvtest_entry_point) + +SECTIONS +{ + . = 0x80000000; + .text.init : { *(.text.init) } + . = ALIGN(0x1000); + .tohost : { *(.tohost) } + . = ALIGN(0x1000); + .text : { *(.text) } + . = ALIGN(0x1000); + .data : { *(.data) } + .data.string : { *(.data.string)} + .bss : { *(.bss) } + _end = .; +} + diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/model_test.h b/vendor/riscv-isa-sim/arch_test_target/spike/model_test.h new file mode 100644 index 00000000..46a66017 --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/model_test.h @@ -0,0 +1,70 @@ +#include "ibex_macros.h" + +#ifndef _COMPLIANCE_MODEL_H +#define _COMPLIANCE_MODEL_H + +#if XLEN == 64 + #define ALIGNMENT 3 +#else + #define ALIGNMENT 2 +#endif + +#define RVMODEL_DATA_SECTION \ + .pushsection .tohost,"aw",@progbits; \ + .align 8; .global tohost; tohost: .dword 0; \ + .align 8; .global fromhost; fromhost: .dword 0; \ + .popsection; \ + .align 8; .global begin_regstate; begin_regstate: \ + .word 128; \ + .align 8; .global end_regstate; end_regstate: \ + .word 4; + +//RV_COMPLIANCE_HALT +#define RVMODEL_HALT \ + fence; \ + li x2, SIGNATURE_ADDR; \ + li x1, (FINISHED_IRQ << 8) | CORE_STATUS; \ + sw x1, 0(x2); \ + li x1, (TEST_PASS << 8) | TEST_RESULT; \ + sw x1, 0(x2); \ + self_loop: j self_loop; + +#define RVMODEL_BOOT + +//RV_COMPLIANCE_DATA_BEGIN +#define RVMODEL_DATA_BEGIN \ + .align 4; .global begin_signature; begin_signature: + +//RV_COMPLIANCE_DATA_END +#define RVMODEL_DATA_END \ + .align 4; .global end_signature; end_signature: \ + RVMODEL_DATA_SECTION \ + +//RVTEST_IO_INIT +#define RVMODEL_IO_INIT +//RVTEST_IO_WRITE_STR +#define RVMODEL_IO_WRITE_STR(_R, _STR) +//RVTEST_IO_CHECK +#define RVMODEL_IO_CHECK() +//RVTEST_IO_ASSERT_GPR_EQ +#define RVMODEL_IO_ASSERT_GPR_EQ(_S, _R, _I) +//RVTEST_IO_ASSERT_SFPR_EQ +#define RVMODEL_IO_ASSERT_SFPR_EQ(_F, _R, _I) +//RVTEST_IO_ASSERT_DFPR_EQ +#define RVMODEL_IO_ASSERT_DFPR_EQ(_D, _R, _I) + +#define RVMODEL_SET_MSW_INT \ + li t1, 1; \ + li t2, 0x2000000; \ + sw t1, 0(t2); + +#define RVMODEL_CLEAR_MSW_INT \ + li t2, 0x2000000; \ + sw x0, 0(t2); + +#define RVMODEL_CLEAR_MTIMER_INT + +#define RVMODEL_CLEAR_MEXT_INT + +#endif // _COMPLIANCE_MODEL_H + diff --git a/vendor/riscv-isa-sim/ax_append_flag.m4 b/vendor/riscv-isa-sim/ax_append_flag.m4 new file mode 100644 index 00000000..dd6d8b61 --- /dev/null +++ b/vendor/riscv-isa-sim/ax_append_flag.m4 @@ -0,0 +1,50 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_append_flag.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_APPEND_FLAG(FLAG, [FLAGS-VARIABLE]) +# +# DESCRIPTION +# +# FLAG is appended to the FLAGS-VARIABLE shell variable, with a space +# added in between. +# +# If FLAGS-VARIABLE is not specified, the current language's flags (e.g. +# CFLAGS) is used. FLAGS-VARIABLE is not changed if it already contains +# FLAG. If FLAGS-VARIABLE is unset in the shell, it is set to exactly +# FLAG. +# +# NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. +# +# LICENSE +# +# Copyright (c) 2008 Guido U. Draheim +# Copyright (c) 2011 Maarten Bosmans +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 8 + +AC_DEFUN([AX_APPEND_FLAG], +[dnl +AC_PREREQ(2.64)dnl for _AC_LANG_PREFIX and AS_VAR_SET_IF +AS_VAR_PUSHDEF([FLAGS], [m4_default($2,_AC_LANG_PREFIX[FLAGS])]) +AS_VAR_SET_IF(FLAGS,[ + AS_CASE([" AS_VAR_GET(FLAGS) "], + [*" $1 "*], [AC_RUN_LOG([: FLAGS already contains $1])], + [ + AS_VAR_APPEND(FLAGS,[" $1"]) + AC_RUN_LOG([: FLAGS="$FLAGS"]) + ]) + ], + [ + AS_VAR_SET(FLAGS,[$1]) + AC_RUN_LOG([: FLAGS="$FLAGS"]) + ]) +AS_VAR_POPDEF([FLAGS])dnl +])dnl AX_APPEND_FLAG diff --git a/vendor/riscv-isa-sim/ax_append_link_flags.m4 b/vendor/riscv-isa-sim/ax_append_link_flags.m4 new file mode 100644 index 00000000..99b9fa5b --- /dev/null +++ b/vendor/riscv-isa-sim/ax_append_link_flags.m4 @@ -0,0 +1,44 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_append_link_flags.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_APPEND_LINK_FLAGS([FLAG1 FLAG2 ...], [FLAGS-VARIABLE], [EXTRA-FLAGS], [INPUT]) +# +# DESCRIPTION +# +# For every FLAG1, FLAG2 it is checked whether the linker works with the +# flag. If it does, the flag is added FLAGS-VARIABLE +# +# If FLAGS-VARIABLE is not specified, the linker's flags (LDFLAGS) is +# used. During the check the flag is always added to the linker's flags. +# +# If EXTRA-FLAGS is defined, it is added to the linker's default flags +# when the check is done. The check is thus made with the flags: "LDFLAGS +# EXTRA-FLAGS FLAG". This can for example be used to force the linker to +# issue an error when a bad flag is given. +# +# INPUT gives an alternative input source to AC_COMPILE_IFELSE. +# +# NOTE: This macro depends on the AX_APPEND_FLAG and AX_CHECK_LINK_FLAG. +# Please keep this macro in sync with AX_APPEND_COMPILE_FLAGS. +# +# LICENSE +# +# Copyright (c) 2011 Maarten Bosmans +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 7 + +AC_DEFUN([AX_APPEND_LINK_FLAGS], +[AX_REQUIRE_DEFINED([AX_CHECK_LINK_FLAG]) +AX_REQUIRE_DEFINED([AX_APPEND_FLAG]) +for flag in $1; do + AX_CHECK_LINK_FLAG([$flag], [AX_APPEND_FLAG([$flag], [m4_default([$2], [LDFLAGS])])], [], [$3], [$4]) +done +])dnl AX_APPEND_LINK_FLAGS diff --git a/vendor/riscv-isa-sim/ax_boost_asio.m4 b/vendor/riscv-isa-sim/ax_boost_asio.m4 new file mode 100644 index 00000000..4247b33c --- /dev/null +++ b/vendor/riscv-isa-sim/ax_boost_asio.m4 @@ -0,0 +1,110 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_boost_asio.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_BOOST_ASIO +# +# DESCRIPTION +# +# Test for Asio library from the Boost C++ libraries. The macro requires a +# preceding call to AX_BOOST_BASE. Further documentation is available at +# . +# +# This macro calls: +# +# AC_SUBST(BOOST_ASIO_LIB) +# +# And sets: +# +# HAVE_BOOST_ASIO +# +# LICENSE +# +# Copyright (c) 2008 Thomas Porschberg +# Copyright (c) 2008 Pete Greenwell +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 18 + +AC_DEFUN([AX_BOOST_ASIO], +[ + AC_ARG_WITH([boost-asio], + AS_HELP_STRING([--with-boost-asio@<:@=special-lib@:>@], + [use the ASIO library from boost - it is possible to specify a certain library for the linker + e.g. --with-boost-asio=boost_system-gcc41-mt-1_34 ]), + [ + if test "$withval" = "no"; then + want_boost="no" + elif test "$withval" = "yes"; then + want_boost="yes" + ax_boost_user_asio_lib="" + else + want_boost="yes" + ax_boost_user_asio_lib="$withval" + fi + ], + [want_boost="yes"] + ) + + if test "x$want_boost" = "xyes"; then + AC_REQUIRE([AC_PROG_CC]) + CPPFLAGS_SAVED="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" + export CPPFLAGS + + LDFLAGS_SAVED="$LDFLAGS" + LDFLAGS="$LDFLAGS $BOOST_LDFLAGS" + export LDFLAGS + + AC_CACHE_CHECK(whether the Boost::ASIO library is available, + ax_cv_boost_asio, + [AC_LANG_PUSH([C++]) + AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ @%:@include + ]], + [[ + + boost::asio::io_service io; + boost::system::error_code timer_result; + boost::asio::deadline_timer t(io); + t.cancel(); + io.run_one(); + return 0; + ]])], + ax_cv_boost_asio=yes, ax_cv_boost_asio=no) + AC_LANG_POP([C++]) + ]) + if test "x$ax_cv_boost_asio" = "xyes"; then + AC_DEFINE(HAVE_BOOST_ASIO,,[define if the Boost::ASIO library is available]) + BN=boost_system + BOOSTLIBDIR=`echo $BOOST_LDFLAGS | sed -e 's/@<:@^\/@:>@*//'` + if test "x$ax_boost_user_asio_lib" = "x"; then + for ax_lib in `ls $BOOSTLIBDIR/libboost_system*.so* $BOOSTLIBDIR/libboost_system*.dylib* $BOOSTLIBDIR/libboost_system*.a* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^lib\(boost_system.*\)\.so.*$;\1;' -e 's;^lib\(boost_system.*\)\.dylib.*$;\1;' -e 's;^lib\(boost_system.*\)\.a.*$;\1;' ` ; do + AC_CHECK_LIB($ax_lib, main, [BOOST_ASIO_LIB="-l$ax_lib" AC_SUBST(BOOST_ASIO_LIB) link_thread="yes" break], + [link_thread="no"]) + done + else + for ax_lib in $ax_boost_user_asio_lib $BN-$ax_boost_user_asio_lib; do + AC_CHECK_LIB($ax_lib, main, + [BOOST_ASIO_LIB="-l$ax_lib" AC_SUBST(BOOST_ASIO_LIB) link_asio="yes" break], + [link_asio="no"]) + done + + fi + if test "x$ax_lib" = "x"; then + AC_MSG_ERROR(Could not find a version of the Boost::Asio library!) + fi + if test "x$link_asio" = "xno"; then + AC_MSG_ERROR(Could not link against $ax_lib !) + fi + fi + + CPPFLAGS="$CPPFLAGS_SAVED" + LDFLAGS="$LDFLAGS_SAVED" + fi +]) diff --git a/vendor/riscv-isa-sim/ax_boost_base.m4 b/vendor/riscv-isa-sim/ax_boost_base.m4 new file mode 100644 index 00000000..519f1c9d --- /dev/null +++ b/vendor/riscv-isa-sim/ax_boost_base.m4 @@ -0,0 +1,303 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_boost_base.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_BOOST_BASE([MINIMUM-VERSION], [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) +# +# DESCRIPTION +# +# Test for the Boost C++ libraries of a particular version (or newer) +# +# If no path to the installed boost library is given the macro searchs +# under /usr, /usr/local, /opt and /opt/local and evaluates the +# $BOOST_ROOT environment variable. Further documentation is available at +# . +# +# This macro calls: +# +# AC_SUBST(BOOST_CPPFLAGS) / AC_SUBST(BOOST_LDFLAGS) +# +# And sets: +# +# HAVE_BOOST +# +# LICENSE +# +# Copyright (c) 2008 Thomas Porschberg +# Copyright (c) 2009 Peter Adolphs +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 49 + +# example boost program (need to pass version) +m4_define([_AX_BOOST_BASE_PROGRAM], + [AC_LANG_PROGRAM([[ +#include +]],[[ +(void) ((void)sizeof(char[1 - 2*!!((BOOST_VERSION) < ($1))])); +]])]) + +AC_DEFUN([AX_BOOST_BASE], +[ +AC_ARG_WITH([boost], + [AS_HELP_STRING([--with-boost@<:@=ARG@:>@], + [use Boost library from a standard location (ARG=yes), + from the specified location (ARG=), + or disable it (ARG=no) + @<:@ARG=yes@:>@ ])], + [ + AS_CASE([$withval], + [no],[want_boost="no";_AX_BOOST_BASE_boost_path=""], + [yes],[want_boost="yes";_AX_BOOST_BASE_boost_path=""], + [want_boost="yes";_AX_BOOST_BASE_boost_path="$withval"]) + ], + [want_boost="yes"]) + + +AC_ARG_WITH([boost-libdir], + [AS_HELP_STRING([--with-boost-libdir=LIB_DIR], + [Force given directory for boost libraries. + Note that this will override library path detection, + so use this parameter only if default library detection fails + and you know exactly where your boost libraries are located.])], + [ + AS_IF([test -d "$withval"], + [_AX_BOOST_BASE_boost_lib_path="$withval"], + [AC_MSG_ERROR([--with-boost-libdir expected directory name])]) + ], + [_AX_BOOST_BASE_boost_lib_path=""]) + +BOOST_LDFLAGS="" +BOOST_CPPFLAGS="" +AS_IF([test "x$want_boost" = "xyes"], + [_AX_BOOST_BASE_RUNDETECT([$1],[$2],[$3])]) +AC_SUBST(BOOST_CPPFLAGS) +AC_SUBST(BOOST_LDFLAGS) +]) + + +# convert a version string in $2 to numeric and affect to polymorphic var $1 +AC_DEFUN([_AX_BOOST_BASE_TONUMERICVERSION],[ + AS_IF([test "x$2" = "x"],[_AX_BOOST_BASE_TONUMERICVERSION_req="1.20.0"],[_AX_BOOST_BASE_TONUMERICVERSION_req="$2"]) + _AX_BOOST_BASE_TONUMERICVERSION_req_shorten=`expr $_AX_BOOST_BASE_TONUMERICVERSION_req : '\([[0-9]]*\.[[0-9]]*\)'` + _AX_BOOST_BASE_TONUMERICVERSION_req_major=`expr $_AX_BOOST_BASE_TONUMERICVERSION_req : '\([[0-9]]*\)'` + AS_IF([test "x$_AX_BOOST_BASE_TONUMERICVERSION_req_major" = "x"], + [AC_MSG_ERROR([You should at least specify libboost major version])]) + _AX_BOOST_BASE_TONUMERICVERSION_req_minor=`expr $_AX_BOOST_BASE_TONUMERICVERSION_req : '[[0-9]]*\.\([[0-9]]*\)'` + AS_IF([test "x$_AX_BOOST_BASE_TONUMERICVERSION_req_minor" = "x"], + [_AX_BOOST_BASE_TONUMERICVERSION_req_minor="0"]) + _AX_BOOST_BASE_TONUMERICVERSION_req_sub_minor=`expr $_AX_BOOST_BASE_TONUMERICVERSION_req : '[[0-9]]*\.[[0-9]]*\.\([[0-9]]*\)'` + AS_IF([test "X$_AX_BOOST_BASE_TONUMERICVERSION_req_sub_minor" = "X"], + [_AX_BOOST_BASE_TONUMERICVERSION_req_sub_minor="0"]) + _AX_BOOST_BASE_TONUMERICVERSION_RET=`expr $_AX_BOOST_BASE_TONUMERICVERSION_req_major \* 100000 \+ $_AX_BOOST_BASE_TONUMERICVERSION_req_minor \* 100 \+ $_AX_BOOST_BASE_TONUMERICVERSION_req_sub_minor` + AS_VAR_SET($1,$_AX_BOOST_BASE_TONUMERICVERSION_RET) +]) + +dnl Run the detection of boost should be run only if $want_boost +AC_DEFUN([_AX_BOOST_BASE_RUNDETECT],[ + _AX_BOOST_BASE_TONUMERICVERSION(WANT_BOOST_VERSION,[$1]) + succeeded=no + + + AC_REQUIRE([AC_CANONICAL_HOST]) + dnl On 64-bit systems check for system libraries in both lib64 and lib. + dnl The former is specified by FHS, but e.g. Debian does not adhere to + dnl this (as it rises problems for generic multi-arch support). + dnl The last entry in the list is chosen by default when no libraries + dnl are found, e.g. when only header-only libraries are installed! + AS_CASE([${host_cpu}], + [x86_64],[libsubdirs="lib64 libx32 lib lib64"], + [mips*64*],[libsubdirs="lib64 lib32 lib lib64"], + [ppc64|powerpc64|s390x|sparc64|aarch64|ppc64le|powerpc64le|riscv64|e2k],[libsubdirs="lib64 lib lib64"], + [libsubdirs="lib"] + ) + + dnl allow for real multi-arch paths e.g. /usr/lib/x86_64-linux-gnu. Give + dnl them priority over the other paths since, if libs are found there, they + dnl are almost assuredly the ones desired. + AS_CASE([${host_cpu}], + [i?86],[multiarch_libsubdir="lib/i386-${host_os}"], + [armv7l],[multiarch_libsubdir="lib/arm-${host_os}"], + [multiarch_libsubdir="lib/${host_cpu}-${host_os}"] + ) + + dnl first we check the system location for boost libraries + dnl this location ist chosen if boost libraries are installed with the --layout=system option + dnl or if you install boost with RPM + AS_IF([test "x$_AX_BOOST_BASE_boost_path" != "x"],[ + AC_MSG_CHECKING([for boostlib >= $1 ($WANT_BOOST_VERSION) includes in "$_AX_BOOST_BASE_boost_path/include"]) + AS_IF([test -d "$_AX_BOOST_BASE_boost_path/include" && test -r "$_AX_BOOST_BASE_boost_path/include"],[ + AC_MSG_RESULT([yes]) + BOOST_CPPFLAGS="-I$_AX_BOOST_BASE_boost_path/include" + for _AX_BOOST_BASE_boost_path_tmp in $multiarch_libsubdir $libsubdirs; do + AC_MSG_CHECKING([for boostlib >= $1 ($WANT_BOOST_VERSION) lib path in "$_AX_BOOST_BASE_boost_path/$_AX_BOOST_BASE_boost_path_tmp"]) + AS_IF([test -d "$_AX_BOOST_BASE_boost_path/$_AX_BOOST_BASE_boost_path_tmp" && test -r "$_AX_BOOST_BASE_boost_path/$_AX_BOOST_BASE_boost_path_tmp" ],[ + AC_MSG_RESULT([yes]) + BOOST_LDFLAGS="-L$_AX_BOOST_BASE_boost_path/$_AX_BOOST_BASE_boost_path_tmp"; + break; + ], + [AC_MSG_RESULT([no])]) + done],[ + AC_MSG_RESULT([no])]) + ],[ + if test X"$cross_compiling" = Xyes; then + search_libsubdirs=$multiarch_libsubdir + else + search_libsubdirs="$multiarch_libsubdir $libsubdirs" + fi + for _AX_BOOST_BASE_boost_path_tmp in /usr /usr/local /opt /opt/local ; do + if test -d "$_AX_BOOST_BASE_boost_path_tmp/include/boost" && test -r "$_AX_BOOST_BASE_boost_path_tmp/include/boost" ; then + for libsubdir in $search_libsubdirs ; do + if ls "$_AX_BOOST_BASE_boost_path_tmp/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi + done + BOOST_LDFLAGS="-L$_AX_BOOST_BASE_boost_path_tmp/$libsubdir" + BOOST_CPPFLAGS="-I$_AX_BOOST_BASE_boost_path_tmp/include" + break; + fi + done + ]) + + dnl overwrite ld flags if we have required special directory with + dnl --with-boost-libdir parameter + AS_IF([test "x$_AX_BOOST_BASE_boost_lib_path" != "x"], + [BOOST_LDFLAGS="-L$_AX_BOOST_BASE_boost_lib_path"]) + + AC_MSG_CHECKING([for boostlib >= $1 ($WANT_BOOST_VERSION)]) + CPPFLAGS_SAVED="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" + export CPPFLAGS + + LDFLAGS_SAVED="$LDFLAGS" + LDFLAGS="$LDFLAGS $BOOST_LDFLAGS" + export LDFLAGS + + AC_REQUIRE([AC_PROG_CXX]) + AC_LANG_PUSH(C++) + AC_COMPILE_IFELSE([_AX_BOOST_BASE_PROGRAM($WANT_BOOST_VERSION)],[ + AC_MSG_RESULT(yes) + succeeded=yes + found_system=yes + ],[ + ]) + AC_LANG_POP([C++]) + + + + dnl if we found no boost with system layout we search for boost libraries + dnl built and installed without the --layout=system option or for a staged(not installed) version + if test "x$succeeded" != "xyes" ; then + CPPFLAGS="$CPPFLAGS_SAVED" + LDFLAGS="$LDFLAGS_SAVED" + BOOST_CPPFLAGS= + if test -z "$_AX_BOOST_BASE_boost_lib_path" ; then + BOOST_LDFLAGS= + fi + _version=0 + if test -n "$_AX_BOOST_BASE_boost_path" ; then + if test -d "$_AX_BOOST_BASE_boost_path" && test -r "$_AX_BOOST_BASE_boost_path"; then + for i in `ls -d $_AX_BOOST_BASE_boost_path/include/boost-* 2>/dev/null`; do + _version_tmp=`echo $i | sed "s#$_AX_BOOST_BASE_boost_path##" | sed 's/\/include\/boost-//' | sed 's/_/./'` + V_CHECK=`expr $_version_tmp \> $_version` + if test "x$V_CHECK" = "x1" ; then + _version=$_version_tmp + fi + VERSION_UNDERSCORE=`echo $_version | sed 's/\./_/'` + BOOST_CPPFLAGS="-I$_AX_BOOST_BASE_boost_path/include/boost-$VERSION_UNDERSCORE" + done + dnl if nothing found search for layout used in Windows distributions + if test -z "$BOOST_CPPFLAGS"; then + if test -d "$_AX_BOOST_BASE_boost_path/boost" && test -r "$_AX_BOOST_BASE_boost_path/boost"; then + BOOST_CPPFLAGS="-I$_AX_BOOST_BASE_boost_path" + fi + fi + dnl if we found something and BOOST_LDFLAGS was unset before + dnl (because "$_AX_BOOST_BASE_boost_lib_path" = ""), set it here. + if test -n "$BOOST_CPPFLAGS" && test -z "$BOOST_LDFLAGS"; then + for libsubdir in $libsubdirs ; do + if ls "$_AX_BOOST_BASE_boost_path/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi + done + BOOST_LDFLAGS="-L$_AX_BOOST_BASE_boost_path/$libsubdir" + fi + fi + else + if test "x$cross_compiling" != "xyes" ; then + for _AX_BOOST_BASE_boost_path in /usr /usr/local /opt /opt/local ; do + if test -d "$_AX_BOOST_BASE_boost_path" && test -r "$_AX_BOOST_BASE_boost_path" ; then + for i in `ls -d $_AX_BOOST_BASE_boost_path/include/boost-* 2>/dev/null`; do + _version_tmp=`echo $i | sed "s#$_AX_BOOST_BASE_boost_path##" | sed 's/\/include\/boost-//' | sed 's/_/./'` + V_CHECK=`expr $_version_tmp \> $_version` + if test "x$V_CHECK" = "x1" ; then + _version=$_version_tmp + best_path=$_AX_BOOST_BASE_boost_path + fi + done + fi + done + + VERSION_UNDERSCORE=`echo $_version | sed 's/\./_/'` + BOOST_CPPFLAGS="-I$best_path/include/boost-$VERSION_UNDERSCORE" + if test -z "$_AX_BOOST_BASE_boost_lib_path" ; then + for libsubdir in $libsubdirs ; do + if ls "$best_path/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi + done + BOOST_LDFLAGS="-L$best_path/$libsubdir" + fi + fi + + if test -n "$BOOST_ROOT" ; then + for libsubdir in $libsubdirs ; do + if ls "$BOOST_ROOT/stage/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi + done + if test -d "$BOOST_ROOT" && test -r "$BOOST_ROOT" && test -d "$BOOST_ROOT/stage/$libsubdir" && test -r "$BOOST_ROOT/stage/$libsubdir"; then + version_dir=`expr //$BOOST_ROOT : '.*/\(.*\)'` + stage_version=`echo $version_dir | sed 's/boost_//' | sed 's/_/./g'` + stage_version_shorten=`expr $stage_version : '\([[0-9]]*\.[[0-9]]*\)'` + V_CHECK=`expr $stage_version_shorten \>\= $_version` + if test "x$V_CHECK" = "x1" && test -z "$_AX_BOOST_BASE_boost_lib_path" ; then + AC_MSG_NOTICE(We will use a staged boost library from $BOOST_ROOT) + BOOST_CPPFLAGS="-I$BOOST_ROOT" + BOOST_LDFLAGS="-L$BOOST_ROOT/stage/$libsubdir" + fi + fi + fi + fi + + CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" + export CPPFLAGS + LDFLAGS="$LDFLAGS $BOOST_LDFLAGS" + export LDFLAGS + + AC_LANG_PUSH(C++) + AC_COMPILE_IFELSE([_AX_BOOST_BASE_PROGRAM($WANT_BOOST_VERSION)],[ + AC_MSG_RESULT(yes) + succeeded=yes + found_system=yes + ],[ + ]) + AC_LANG_POP([C++]) + fi + + if test "x$succeeded" != "xyes" ; then + if test "x$_version" = "x0" ; then + AC_MSG_NOTICE([[We could not detect the boost libraries (version $1 or higher). If you have a staged boost library (still not installed) please specify \$BOOST_ROOT in your environment and do not give a PATH to --with-boost option. If you are sure you have boost installed, then check your version number looking in . See http://randspringer.de/boost for more documentation.]]) + else + AC_MSG_NOTICE([Your boost libraries seems to old (version $_version).]) + fi + # execute ACTION-IF-NOT-FOUND (if present): + ifelse([$3], , :, [$3]) + else + AC_DEFINE(HAVE_BOOST,,[define if the Boost library is available]) + # execute ACTION-IF-FOUND (if present): + ifelse([$2], , :, [$2]) + fi + + CPPFLAGS="$CPPFLAGS_SAVED" + LDFLAGS="$LDFLAGS_SAVED" + +]) diff --git a/vendor/riscv-isa-sim/ax_boost_regex.m4 b/vendor/riscv-isa-sim/ax_boost_regex.m4 new file mode 100644 index 00000000..e2413c24 --- /dev/null +++ b/vendor/riscv-isa-sim/ax_boost_regex.m4 @@ -0,0 +1,111 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_boost_regex.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_BOOST_REGEX +# +# DESCRIPTION +# +# Test for Regex library from the Boost C++ libraries. The macro requires +# a preceding call to AX_BOOST_BASE. Further documentation is available at +# . +# +# This macro calls: +# +# AC_SUBST(BOOST_REGEX_LIB) +# +# And sets: +# +# HAVE_BOOST_REGEX +# +# LICENSE +# +# Copyright (c) 2008 Thomas Porschberg +# Copyright (c) 2008 Michael Tindal +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 23 + +AC_DEFUN([AX_BOOST_REGEX], +[ + AC_ARG_WITH([boost-regex], + AS_HELP_STRING([--with-boost-regex@<:@=special-lib@:>@], + [use the Regex library from boost - it is possible to specify a certain library for the linker + e.g. --with-boost-regex=boost_regex-gcc-mt-d-1_33_1 ]), + [ + if test "$withval" = "no"; then + want_boost="no" + elif test "$withval" = "yes"; then + want_boost="yes" + ax_boost_user_regex_lib="" + else + want_boost="yes" + ax_boost_user_regex_lib="$withval" + fi + ], + [want_boost="yes"] + ) + + if test "x$want_boost" = "xyes"; then + AC_REQUIRE([AC_PROG_CC]) + CPPFLAGS_SAVED="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" + export CPPFLAGS + + LDFLAGS_SAVED="$LDFLAGS" + LDFLAGS="$LDFLAGS $BOOST_LDFLAGS" + export LDFLAGS + + AC_CACHE_CHECK(whether the Boost::Regex library is available, + ax_cv_boost_regex, + [AC_LANG_PUSH([C++]) + AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[@%:@include + ]], + [[boost::regex r(); return 0;]])], + ax_cv_boost_regex=yes, ax_cv_boost_regex=no) + AC_LANG_POP([C++]) + ]) + if test "x$ax_cv_boost_regex" = "xyes"; then + AC_DEFINE(HAVE_BOOST_REGEX,,[define if the Boost::Regex library is available]) + BOOSTLIBDIR=`echo $BOOST_LDFLAGS | sed -e 's/@<:@^\/@:>@*//'` + if test "x$ax_boost_user_regex_lib" = "x"; then + for libextension in `ls $BOOSTLIBDIR/libboost_regex*.so* $BOOSTLIBDIR/libboost_regex*.dylib* $BOOSTLIBDIR/libboost_regex*.a* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^lib\(boost_regex.*\)\.so.*$;\1;' -e 's;^lib\(boost_regex.*\)\.dylib.*;\1;' -e 's;^lib\(boost_regex.*\)\.a.*$;\1;'` ; do + ax_lib=${libextension} + AC_CHECK_LIB($ax_lib, exit, + [BOOST_REGEX_LIB="-l$ax_lib"; AC_SUBST(BOOST_REGEX_LIB) link_regex="yes"; break], + [link_regex="no"]) + done + if test "x$link_regex" != "xyes"; then + for libextension in `ls $BOOSTLIBDIR/boost_regex*.dll* $BOOSTLIBDIR/boost_regex*.a* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^\(boost_regex.*\)\.dll.*$;\1;' -e 's;^\(boost_regex.*\)\.a.*$;\1;'` ; do + ax_lib=${libextension} + AC_CHECK_LIB($ax_lib, exit, + [BOOST_REGEX_LIB="-l$ax_lib"; AC_SUBST(BOOST_REGEX_LIB) link_regex="yes"; break], + [link_regex="no"]) + done + fi + + else + for ax_lib in $ax_boost_user_regex_lib boost_regex-$ax_boost_user_regex_lib; do + AC_CHECK_LIB($ax_lib, main, + [BOOST_REGEX_LIB="-l$ax_lib"; AC_SUBST(BOOST_REGEX_LIB) link_regex="yes"; break], + [link_regex="no"]) + done + fi + if test "x$ax_lib" = "x"; then + AC_MSG_ERROR(Could not find a version of the Boost::Regex library!) + fi + if test "x$link_regex" != "xyes"; then + AC_MSG_ERROR(Could not link against $ax_lib !) + fi + fi + + CPPFLAGS="$CPPFLAGS_SAVED" + LDFLAGS="$LDFLAGS_SAVED" + fi +]) diff --git a/vendor/riscv-isa-sim/ax_check_compile_flag.m4 b/vendor/riscv-isa-sim/ax_check_compile_flag.m4 new file mode 100644 index 00000000..bd753b34 --- /dev/null +++ b/vendor/riscv-isa-sim/ax_check_compile_flag.m4 @@ -0,0 +1,53 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_check_compile_flag.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_CHECK_COMPILE_FLAG(FLAG, [ACTION-SUCCESS], [ACTION-FAILURE], [EXTRA-FLAGS], [INPUT]) +# +# DESCRIPTION +# +# Check whether the given FLAG works with the current language's compiler +# or gives an error. (Warnings, however, are ignored) +# +# ACTION-SUCCESS/ACTION-FAILURE are shell commands to execute on +# success/failure. +# +# If EXTRA-FLAGS is defined, it is added to the current language's default +# flags (e.g. CFLAGS) when the check is done. The check is thus made with +# the flags: "CFLAGS EXTRA-FLAGS FLAG". This can for example be used to +# force the compiler to issue an error when a bad flag is given. +# +# INPUT gives an alternative input source to AC_COMPILE_IFELSE. +# +# NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. Please keep this +# macro in sync with AX_CHECK_{PREPROC,LINK}_FLAG. +# +# LICENSE +# +# Copyright (c) 2008 Guido U. Draheim +# Copyright (c) 2011 Maarten Bosmans +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 6 + +AC_DEFUN([AX_CHECK_COMPILE_FLAG], +[AC_PREREQ(2.64)dnl for _AC_LANG_PREFIX and AS_VAR_IF +AS_VAR_PUSHDEF([CACHEVAR],[ax_cv_check_[]_AC_LANG_ABBREV[]flags_$4_$1])dnl +AC_CACHE_CHECK([whether _AC_LANG compiler accepts $1], CACHEVAR, [ + ax_check_save_flags=$[]_AC_LANG_PREFIX[]FLAGS + _AC_LANG_PREFIX[]FLAGS="$[]_AC_LANG_PREFIX[]FLAGS $4 $1" + AC_COMPILE_IFELSE([m4_default([$5],[AC_LANG_PROGRAM()])], + [AS_VAR_SET(CACHEVAR,[yes])], + [AS_VAR_SET(CACHEVAR,[no])]) + _AC_LANG_PREFIX[]FLAGS=$ax_check_save_flags]) +AS_VAR_IF(CACHEVAR,yes, + [m4_default([$2], :)], + [m4_default([$3], :)]) +AS_VAR_POPDEF([CACHEVAR])dnl +])dnl AX_CHECK_COMPILE_FLAGS diff --git a/vendor/riscv-isa-sim/ax_check_link_flag.m4 b/vendor/riscv-isa-sim/ax_check_link_flag.m4 new file mode 100644 index 00000000..03a30ce4 --- /dev/null +++ b/vendor/riscv-isa-sim/ax_check_link_flag.m4 @@ -0,0 +1,53 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_check_link_flag.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_CHECK_LINK_FLAG(FLAG, [ACTION-SUCCESS], [ACTION-FAILURE], [EXTRA-FLAGS], [INPUT]) +# +# DESCRIPTION +# +# Check whether the given FLAG works with the linker or gives an error. +# (Warnings, however, are ignored) +# +# ACTION-SUCCESS/ACTION-FAILURE are shell commands to execute on +# success/failure. +# +# If EXTRA-FLAGS is defined, it is added to the linker's default flags +# when the check is done. The check is thus made with the flags: "LDFLAGS +# EXTRA-FLAGS FLAG". This can for example be used to force the linker to +# issue an error when a bad flag is given. +# +# INPUT gives an alternative input source to AC_LINK_IFELSE. +# +# NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. Please keep this +# macro in sync with AX_CHECK_{PREPROC,COMPILE}_FLAG. +# +# LICENSE +# +# Copyright (c) 2008 Guido U. Draheim +# Copyright (c) 2011 Maarten Bosmans +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 6 + +AC_DEFUN([AX_CHECK_LINK_FLAG], +[AC_PREREQ(2.64)dnl for _AC_LANG_PREFIX and AS_VAR_IF +AS_VAR_PUSHDEF([CACHEVAR],[ax_cv_check_ldflags_$4_$1])dnl +AC_CACHE_CHECK([whether the linker accepts $1], CACHEVAR, [ + ax_check_save_flags=$LDFLAGS + LDFLAGS="$LDFLAGS $4 $1" + AC_LINK_IFELSE([m4_default([$5],[AC_LANG_PROGRAM()])], + [AS_VAR_SET(CACHEVAR,[yes])], + [AS_VAR_SET(CACHEVAR,[no])]) + LDFLAGS=$ax_check_save_flags]) +AS_VAR_IF(CACHEVAR,yes, + [m4_default([$2], :)], + [m4_default([$3], :)]) +AS_VAR_POPDEF([CACHEVAR])dnl +])dnl AX_CHECK_LINK_FLAGS diff --git a/vendor/riscv-isa-sim/ax_require_defined.m4 b/vendor/riscv-isa-sim/ax_require_defined.m4 new file mode 100644 index 00000000..17c3eab7 --- /dev/null +++ b/vendor/riscv-isa-sim/ax_require_defined.m4 @@ -0,0 +1,37 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_require_defined.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_REQUIRE_DEFINED(MACRO) +# +# DESCRIPTION +# +# AX_REQUIRE_DEFINED is a simple helper for making sure other macros have +# been defined and thus are available for use. This avoids random issues +# where a macro isn't expanded. Instead the configure script emits a +# non-fatal: +# +# ./configure: line 1673: AX_CFLAGS_WARN_ALL: command not found +# +# It's like AC_REQUIRE except it doesn't expand the required macro. +# +# Here's an example: +# +# AX_REQUIRE_DEFINED([AX_CHECK_LINK_FLAG]) +# +# LICENSE +# +# Copyright (c) 2014 Mike Frysinger +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 2 + +AC_DEFUN([AX_REQUIRE_DEFINED], [dnl + m4_ifndef([$1], [m4_fatal([macro ]$1[ is not defined; is a m4 file missing?])]) +])dnl AX_REQUIRE_DEFINED diff --git a/vendor/riscv-isa-sim/ci-tests/test-spike b/vendor/riscv-isa-sim/ci-tests/test-spike new file mode 100755 index 00000000..3d5ed6d7 --- /dev/null +++ b/vendor/riscv-isa-sim/ci-tests/test-spike @@ -0,0 +1,11 @@ +#!/bin/bash +set -e + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +mkdir build +cd build +mkdir install +$DIR/../configure --prefix=`pwd`/install +make -j4 +make install diff --git a/vendor/riscv-isa-sim/config.h.in b/vendor/riscv-isa-sim/config.h.in new file mode 100644 index 00000000..46d8c00b --- /dev/null +++ b/vendor/riscv-isa-sim/config.h.in @@ -0,0 +1,142 @@ +/* config.h.in. Generated from configure.ac by autoheader. */ + +/* Define if building universal (internal helper macro) */ +#undef AC_APPLE_UNIVERSAL_BUILD + +/* Define if subproject MCPPBS_SPROJ_NORM is enabled */ +#undef CUSTOMEXT_ENABLED + +/* Default value for --isa switch */ +#undef DEFAULT_ISA + +/* Default value for --priv switch */ +#undef DEFAULT_PRIV + +/* Default value for --varch switch */ +#undef DEFAULT_VARCH + +/* Define if subproject MCPPBS_SPROJ_NORM is enabled */ +#undef DISASM_ENABLED + +/* Executable name of device-tree-compiler */ +#undef DTC + +/* Define if subproject MCPPBS_SPROJ_NORM is enabled */ +#undef FDT_ENABLED + +/* Define if subproject MCPPBS_SPROJ_NORM is enabled */ +#undef FESVR_ENABLED + +/* define if the Boost library is available */ +#undef HAVE_BOOST + +/* define if the Boost::ASIO library is available */ +#undef HAVE_BOOST_ASIO + +/* Dynamic library loading is supported */ +#undef HAVE_DLOPEN + +/* Define to 1 if you have the header file. */ +#undef HAVE_INTTYPES_H + +/* Define to 1 if you have the `boost_regex' library (-lboost_regex). */ +#undef HAVE_LIBBOOST_REGEX + +/* Define to 1 if you have the `boost_system' library (-lboost_system). */ +#undef HAVE_LIBBOOST_SYSTEM + +/* Define to 1 if you have the `pthread' library (-lpthread). */ +#undef HAVE_LIBPTHREAD + +/* Define to 1 if you have the header file. */ +#undef HAVE_MEMORY_H + +/* Define to 1 if struct statx exists. */ +#undef HAVE_STATX + +/* Define to 1 if struct statx has stx_mnt_id. */ +#undef HAVE_STATX_MNT_ID + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDINT_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDLIB_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STRINGS_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STRING_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_STAT_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_TYPES_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_UNISTD_H + +/* Define to the address where bug reports for this package should be sent. */ +#undef PACKAGE_BUGREPORT + +/* Define to the full name of this package. */ +#undef PACKAGE_NAME + +/* Define to the full name and version of this package. */ +#undef PACKAGE_STRING + +/* Define to the one symbol short name of this package. */ +#undef PACKAGE_TARNAME + +/* Define to the home page for this package. */ +#undef PACKAGE_URL + +/* Define to the version of this package. */ +#undef PACKAGE_VERSION + +/* Define if subproject MCPPBS_SPROJ_NORM is enabled */ +#undef RISCV_ENABLED + +/* Enable commit log generation */ +#undef RISCV_ENABLE_COMMITLOG + +/* Enable hardware management of PTE accessed and dirty bits */ +#undef RISCV_ENABLE_DIRTY + +/* Enable support for running target in either endianness */ +#undef RISCV_ENABLE_DUAL_ENDIAN + +/* Enable PC histogram generation */ +#undef RISCV_ENABLE_HISTOGRAM + +/* Enable hardware support for misaligned loads and stores */ +#undef RISCV_ENABLE_MISALIGNED + +/* Define if subproject MCPPBS_SPROJ_NORM is enabled */ +#undef SOFTFLOAT_ENABLED + +/* Define if subproject MCPPBS_SPROJ_NORM is enabled */ +#undef SPIKE_DASM_ENABLED + +/* Define if subproject MCPPBS_SPROJ_NORM is enabled */ +#undef SPIKE_MAIN_ENABLED + +/* Define to 1 if you have the ANSI C header files. */ +#undef STDC_HEADERS + +/* Default value for --with-target switch */ +#undef TARGET_ARCH + +/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most + significant byte first (like Motorola and SPARC, unlike Intel). */ +#if defined AC_APPLE_UNIVERSAL_BUILD +# if defined __BIG_ENDIAN__ +# define WORDS_BIGENDIAN 1 +# endif +#else +# ifndef WORDS_BIGENDIAN +# undef WORDS_BIGENDIAN +# endif +#endif diff --git a/vendor/riscv-isa-sim/configure b/vendor/riscv-isa-sim/configure new file mode 100755 index 00000000..0af582f3 --- /dev/null +++ b/vendor/riscv-isa-sim/configure @@ -0,0 +1,7714 @@ +#! /bin/sh +# Guess values for system-dependent variables and create Makefiles. +# Generated by GNU Autoconf 2.69 for RISC-V ISA Simulator ?. +# +# Report bugs to . +# +# +# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. +# +# +# This configure script is free software; the Free Software Foundation +# gives unlimited permission to copy, distribute and modify it. +## -------------------- ## +## M4sh Initialization. ## +## -------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi + + +as_nl=' +' +export as_nl +# Printing a long string crashes Solaris 7 /usr/bin/printf. +as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo +# Prefer a ksh shell builtin over an external printf program on Solaris, +# but without wasting forks for bash or zsh. +if test -z "$BASH_VERSION$ZSH_VERSION" \ + && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='print -r --' + as_echo_n='print -rn --' +elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='printf %s\n' + as_echo_n='printf %s' +else + if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then + as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' + as_echo_n='/usr/ucb/echo -n' + else + as_echo_body='eval expr "X$1" : "X\\(.*\\)"' + as_echo_n_body='eval + arg=$1; + case $arg in #( + *"$as_nl"*) + expr "X$arg" : "X\\(.*\\)$as_nl"; + arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; + esac; + expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" + ' + export as_echo_n_body + as_echo_n='sh -c $as_echo_n_body as_echo' + fi + export as_echo_body + as_echo='sh -c $as_echo_body as_echo' +fi + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +IFS=" "" $as_nl" + +# Find who we are. Look in the path if we contain no directory separator. +as_myself= +case $0 in #(( + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break + done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + exit 1 +fi + +# Unset variables that we do not need and which cause bugs (e.g. in +# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" +# suppresses any "Segmentation fault" message there. '((' could +# trigger a bug in pdksh 5.2.14. +for as_var in BASH_ENV ENV MAIL MAILPATH +do eval test x\${$as_var+set} = xset \ + && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +done +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# CDPATH. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +# Use a proper internal environment variable to ensure we don't fall + # into an infinite loop, continuously re-executing ourselves. + if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then + _as_can_reexec=no; export _as_can_reexec; + # We cannot yet assume a decent shell, so we have to provide a +# neutralization value for shells without unset; and this also +# works around shells that cannot unset nonexistent variables. +# Preserve -v and -x to the replacement shell. +BASH_ENV=/dev/null +ENV=/dev/null +(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV +case $- in # (((( + *v*x* | *x*v* ) as_opts=-vx ;; + *v* ) as_opts=-v ;; + *x* ) as_opts=-x ;; + * ) as_opts= ;; +esac +exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} +# Admittedly, this is quite paranoid, since all the known shells bail +# out after a failed `exec'. +$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 +as_fn_exit 255 + fi + # We don't want this to propagate to other subprocesses. + { _as_can_reexec=; unset _as_can_reexec;} +if test "x$CONFIG_SHELL" = x; then + as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which + # is contrary to our usage. Disable this feature. + alias -g '\${1+\"\$@\"}'='\"\$@\"' + setopt NO_GLOB_SUBST +else + case \`(set -o) 2>/dev/null\` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi +" + as_required="as_fn_return () { (exit \$1); } +as_fn_success () { as_fn_return 0; } +as_fn_failure () { as_fn_return 1; } +as_fn_ret_success () { return 0; } +as_fn_ret_failure () { return 1; } + +exitcode=0 +as_fn_success || { exitcode=1; echo as_fn_success failed.; } +as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } +as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } +as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } +if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : + +else + exitcode=1; echo positional parameters were not saved. +fi +test x\$exitcode = x0 || exit 1 +test -x / || exit 1" + as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO + as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO + eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && + test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 +test \$(( 1 + 1 )) = 2 || exit 1" + if (eval "$as_required") 2>/dev/null; then : + as_have_required=yes +else + as_have_required=no +fi + if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : + +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +as_found=false +for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + as_found=: + case $as_dir in #( + /*) + for as_base in sh bash ksh sh5; do + # Try only shells that exist, to save several forks. + as_shell=$as_dir/$as_base + if { test -f "$as_shell" || test -f "$as_shell.exe"; } && + { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : + CONFIG_SHELL=$as_shell as_have_required=yes + if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : + break 2 +fi +fi + done;; + esac + as_found=false +done +$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && + { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : + CONFIG_SHELL=$SHELL as_have_required=yes +fi; } +IFS=$as_save_IFS + + + if test "x$CONFIG_SHELL" != x; then : + export CONFIG_SHELL + # We cannot yet assume a decent shell, so we have to provide a +# neutralization value for shells without unset; and this also +# works around shells that cannot unset nonexistent variables. +# Preserve -v and -x to the replacement shell. +BASH_ENV=/dev/null +ENV=/dev/null +(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV +case $- in # (((( + *v*x* | *x*v* ) as_opts=-vx ;; + *v* ) as_opts=-v ;; + *x* ) as_opts=-x ;; + * ) as_opts= ;; +esac +exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} +# Admittedly, this is quite paranoid, since all the known shells bail +# out after a failed `exec'. +$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 +exit 255 +fi + + if test x$as_have_required = xno; then : + $as_echo "$0: This script requires a shell more modern than all" + $as_echo "$0: the shells that I found on your system." + if test x${ZSH_VERSION+set} = xset ; then + $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" + $as_echo "$0: be upgraded to zsh 4.3.4 or later." + else + $as_echo "$0: Please tell bug-autoconf@gnu.org and Andrew Waterman +$0: about your system, including any error possibly output +$0: before this message. Then install a modern shell, or +$0: manually run the script under such a shell if you do +$0: have one." + fi + exit 1 +fi +fi +fi +SHELL=${CONFIG_SHELL-/bin/sh} +export SHELL +# Unset more variables known to interfere with behavior of common tools. +CLICOLOR_FORCE= GREP_OPTIONS= +unset CLICOLOR_FORCE GREP_OPTIONS + +## --------------------- ## +## M4sh Shell Functions. ## +## --------------------- ## +# as_fn_unset VAR +# --------------- +# Portably unset VAR. +as_fn_unset () +{ + { eval $1=; unset $1;} +} +as_unset=as_fn_unset + +# as_fn_set_status STATUS +# ----------------------- +# Set $? to STATUS, without forking. +as_fn_set_status () +{ + return $1 +} # as_fn_set_status + +# as_fn_exit STATUS +# ----------------- +# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. +as_fn_exit () +{ + set +e + as_fn_set_status $1 + exit $1 +} # as_fn_exit + +# as_fn_mkdir_p +# ------------- +# Create "$as_dir" as a directory, including parents if necessary. +as_fn_mkdir_p () +{ + + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || eval $as_mkdir_p || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" + + +} # as_fn_mkdir_p + +# as_fn_executable_p FILE +# ----------------------- +# Test if FILE is an executable regular file. +as_fn_executable_p () +{ + test -f "$1" && test -x "$1" +} # as_fn_executable_p +# as_fn_append VAR VALUE +# ---------------------- +# Append the text in VALUE to the end of the definition contained in VAR. Take +# advantage of any shell optimizations that allow amortized linear growth over +# repeated appends, instead of the typical quadratic growth present in naive +# implementations. +if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : + eval 'as_fn_append () + { + eval $1+=\$2 + }' +else + as_fn_append () + { + eval $1=\$$1\$2 + } +fi # as_fn_append + +# as_fn_arith ARG... +# ------------------ +# Perform arithmetic evaluation on the ARGs, and store the result in the +# global $as_val. Take advantage of shells that can avoid forks. The arguments +# must be portable across $(()) and expr. +if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : + eval 'as_fn_arith () + { + as_val=$(( $* )) + }' +else + as_fn_arith () + { + as_val=`expr "$@" || test $? -eq 1` + } +fi # as_fn_arith + + +# as_fn_error STATUS ERROR [LINENO LOG_FD] +# ---------------------------------------- +# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are +# provided, also output the error to LOG_FD, referencing LINENO. Then exit the +# script with STATUS, using 1 if that was 0. +as_fn_error () +{ + as_status=$1; test $as_status -eq 0 && as_status=1 + if test "$4"; then + as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + fi + $as_echo "$as_me: error: $2" >&2 + as_fn_exit $as_status +} # as_fn_error + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + + + as_lineno_1=$LINENO as_lineno_1a=$LINENO + as_lineno_2=$LINENO as_lineno_2a=$LINENO + eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && + test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { + # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) + sed -n ' + p + /[$]LINENO/= + ' <$as_myself | + sed ' + s/[$]LINENO.*/&-/ + t lineno + b + :lineno + N + :loop + s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ + t loop + s/-\n.*// + ' >$as_me.lineno && + chmod +x "$as_me.lineno" || + { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } + + # If we had to re-execute with $CONFIG_SHELL, we're ensured to have + # already done that, so ensure we don't try to do so again and fall + # in an infinite loop. This has already happened in practice. + _as_can_reexec=no; export _as_can_reexec + # Don't try to exec as it changes $[0], causing all sort of problems + # (the dirname of $[0] is not the place where we might find the + # original and so on. Autoconf is especially sensitive to this). + . "./$as_me.lineno" + # Exit status is that of the last command. + exit +} + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in #((((( +-n*) + case `echo 'xy\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + xy) ECHO_C='\c';; + *) echo `echo ksh88 bug on AIX 6.1` > /dev/null + ECHO_T=' ';; + esac;; +*) + ECHO_N='-n';; +esac + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null +fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -pR'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -pR' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else + as_ln_s='cp -pR' + fi +else + as_ln_s='cp -pR' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + +if mkdir -p . 2>/dev/null; then + as_mkdir_p='mkdir -p "$as_dir"' +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + +as_test_x='test -x' +as_executable_p=as_fn_executable_p + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + +test -n "$DJDIR" || exec 7<&0 &1 + +# Name of the host. +# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, +# so uname gets run too. +ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` + +# +# Initializations. +# +ac_default_prefix=/usr/local +ac_clean_files= +ac_config_libobj_dir=. +LIBOBJS= +cross_compiling=no +subdirs= +MFLAGS= +MAKEFLAGS= + +# Identity of this package. +PACKAGE_NAME='RISC-V ISA Simulator' +PACKAGE_TARNAME='spike' +PACKAGE_VERSION='?' +PACKAGE_STRING='RISC-V ISA Simulator ?' +PACKAGE_BUGREPORT='Andrew Waterman' +PACKAGE_URL='' + +ac_unique_file="riscv/common.h" +# Factoring default headers for most tests. +ac_includes_default="\ +#include +#ifdef HAVE_SYS_TYPES_H +# include +#endif +#ifdef HAVE_SYS_STAT_H +# include +#endif +#ifdef STDC_HEADERS +# include +# include +#else +# ifdef HAVE_STDLIB_H +# include +# endif +#endif +#ifdef HAVE_STRING_H +# if !defined STDC_HEADERS && defined HAVE_MEMORY_H +# include +# endif +# include +#endif +#ifdef HAVE_STRINGS_H +# include +#endif +#ifdef HAVE_INTTYPES_H +# include +#endif +#ifdef HAVE_STDINT_H +# include +#endif +#ifdef HAVE_UNISTD_H +# include +#endif" + +ac_subst_vars='LTLIBOBJS +LIBOBJS +subprojects_enabled +subprojects +HAVE_DLOPEN +BOOST_REGEX_LIB +BOOST_ASIO_LIB +BOOST_LDFLAGS +BOOST_CPPFLAGS +HAVE_CLANG_PCH +HAVE_INT128 +INSTALL_DATA +INSTALL_SCRIPT +INSTALL_PROGRAM +STOW_PREFIX +STOW_ROOT +enable_stow +EGREP +GREP +CXXCPP +DTC +RANLIB +AR +ac_ct_CXX +CXXFLAGS +CXX +OBJEXT +EXEEXT +ac_ct_CC +CPPFLAGS +LDFLAGS +CFLAGS +CC +host_os +host_vendor +host_cpu +host +build_os +build_vendor +build_cpu +build +target_alias +host_alias +build_alias +LIBS +ECHO_T +ECHO_N +ECHO_C +DEFS +mandir +localedir +libdir +psdir +pdfdir +dvidir +htmldir +infodir +docdir +oldincludedir +includedir +runstatedir +localstatedir +sharedstatedir +sysconfdir +datadir +datarootdir +libexecdir +sbindir +bindir +program_transform_name +prefix +exec_prefix +PACKAGE_URL +PACKAGE_BUGREPORT +PACKAGE_STRING +PACKAGE_VERSION +PACKAGE_TARNAME +PACKAGE_NAME +PATH_SEPARATOR +SHELL' +ac_subst_files='' +ac_user_opts=' +enable_option_checking +enable_stow +enable_optional_subprojects +with_boost +with_boost_libdir +with_boost_asio +with_boost_regex +with_isa +with_priv +with_varch +with_target +enable_commitlog +enable_histogram +enable_dirty +enable_misaligned +enable_dual_endian +' + ac_precious_vars='build_alias +host_alias +target_alias +CC +CFLAGS +LDFLAGS +LIBS +CPPFLAGS +CXX +CXXFLAGS +CCC +CXXCPP +STOW_ROOT +STOW_PREFIX' + + +# Initialize some variables set by options. +ac_init_help= +ac_init_version=false +ac_unrecognized_opts= +ac_unrecognized_sep= +# The variables have the same names as the options, with +# dashes changed to underlines. +cache_file=/dev/null +exec_prefix=NONE +no_create= +no_recursion= +prefix=NONE +program_prefix=NONE +program_suffix=NONE +program_transform_name=s,x,x, +silent= +site= +srcdir= +verbose= +x_includes=NONE +x_libraries=NONE + +# Installation directory options. +# These are left unexpanded so users can "make install exec_prefix=/foo" +# and all the variables that are supposed to be based on exec_prefix +# by default will actually change. +# Use braces instead of parens because sh, perl, etc. also accept them. +# (The list follows the same order as the GNU Coding Standards.) +bindir='${exec_prefix}/bin' +sbindir='${exec_prefix}/sbin' +libexecdir='${exec_prefix}/libexec' +datarootdir='${prefix}/share' +datadir='${datarootdir}' +sysconfdir='${prefix}/etc' +sharedstatedir='${prefix}/com' +localstatedir='${prefix}/var' +runstatedir='${localstatedir}/run' +includedir='${prefix}/include' +oldincludedir='/usr/include' +docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' +infodir='${datarootdir}/info' +htmldir='${docdir}' +dvidir='${docdir}' +pdfdir='${docdir}' +psdir='${docdir}' +libdir='${exec_prefix}/lib' +localedir='${datarootdir}/locale' +mandir='${datarootdir}/man' + +ac_prev= +ac_dashdash= +for ac_option +do + # If the previous option needs an argument, assign it. + if test -n "$ac_prev"; then + eval $ac_prev=\$ac_option + ac_prev= + continue + fi + + case $ac_option in + *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; + *=) ac_optarg= ;; + *) ac_optarg=yes ;; + esac + + # Accept the important Cygnus configure options, so we can diagnose typos. + + case $ac_dashdash$ac_option in + --) + ac_dashdash=yes ;; + + -bindir | --bindir | --bindi | --bind | --bin | --bi) + ac_prev=bindir ;; + -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) + bindir=$ac_optarg ;; + + -build | --build | --buil | --bui | --bu) + ac_prev=build_alias ;; + -build=* | --build=* | --buil=* | --bui=* | --bu=*) + build_alias=$ac_optarg ;; + + -cache-file | --cache-file | --cache-fil | --cache-fi \ + | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) + ac_prev=cache_file ;; + -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ + | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) + cache_file=$ac_optarg ;; + + --config-cache | -C) + cache_file=config.cache ;; + + -datadir | --datadir | --datadi | --datad) + ac_prev=datadir ;; + -datadir=* | --datadir=* | --datadi=* | --datad=*) + datadir=$ac_optarg ;; + + -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ + | --dataroo | --dataro | --datar) + ac_prev=datarootdir ;; + -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ + | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) + datarootdir=$ac_optarg ;; + + -disable-* | --disable-*) + ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid feature name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"enable_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval enable_$ac_useropt=no ;; + + -docdir | --docdir | --docdi | --doc | --do) + ac_prev=docdir ;; + -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) + docdir=$ac_optarg ;; + + -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) + ac_prev=dvidir ;; + -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) + dvidir=$ac_optarg ;; + + -enable-* | --enable-*) + ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid feature name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"enable_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval enable_$ac_useropt=\$ac_optarg ;; + + -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ + | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ + | --exec | --exe | --ex) + ac_prev=exec_prefix ;; + -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ + | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ + | --exec=* | --exe=* | --ex=*) + exec_prefix=$ac_optarg ;; + + -gas | --gas | --ga | --g) + # Obsolete; use --with-gas. + with_gas=yes ;; + + -help | --help | --hel | --he | -h) + ac_init_help=long ;; + -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) + ac_init_help=recursive ;; + -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) + ac_init_help=short ;; + + -host | --host | --hos | --ho) + ac_prev=host_alias ;; + -host=* | --host=* | --hos=* | --ho=*) + host_alias=$ac_optarg ;; + + -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) + ac_prev=htmldir ;; + -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ + | --ht=*) + htmldir=$ac_optarg ;; + + -includedir | --includedir | --includedi | --included | --include \ + | --includ | --inclu | --incl | --inc) + ac_prev=includedir ;; + -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ + | --includ=* | --inclu=* | --incl=* | --inc=*) + includedir=$ac_optarg ;; + + -infodir | --infodir | --infodi | --infod | --info | --inf) + ac_prev=infodir ;; + -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) + infodir=$ac_optarg ;; + + -libdir | --libdir | --libdi | --libd) + ac_prev=libdir ;; + -libdir=* | --libdir=* | --libdi=* | --libd=*) + libdir=$ac_optarg ;; + + -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ + | --libexe | --libex | --libe) + ac_prev=libexecdir ;; + -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ + | --libexe=* | --libex=* | --libe=*) + libexecdir=$ac_optarg ;; + + -localedir | --localedir | --localedi | --localed | --locale) + ac_prev=localedir ;; + -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) + localedir=$ac_optarg ;; + + -localstatedir | --localstatedir | --localstatedi | --localstated \ + | --localstate | --localstat | --localsta | --localst | --locals) + ac_prev=localstatedir ;; + -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ + | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) + localstatedir=$ac_optarg ;; + + -mandir | --mandir | --mandi | --mand | --man | --ma | --m) + ac_prev=mandir ;; + -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) + mandir=$ac_optarg ;; + + -nfp | --nfp | --nf) + # Obsolete; use --without-fp. + with_fp=no ;; + + -no-create | --no-create | --no-creat | --no-crea | --no-cre \ + | --no-cr | --no-c | -n) + no_create=yes ;; + + -no-recursion | --no-recursion | --no-recursio | --no-recursi \ + | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) + no_recursion=yes ;; + + -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ + | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ + | --oldin | --oldi | --old | --ol | --o) + ac_prev=oldincludedir ;; + -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ + | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ + | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) + oldincludedir=$ac_optarg ;; + + -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) + ac_prev=prefix ;; + -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) + prefix=$ac_optarg ;; + + -program-prefix | --program-prefix | --program-prefi | --program-pref \ + | --program-pre | --program-pr | --program-p) + ac_prev=program_prefix ;; + -program-prefix=* | --program-prefix=* | --program-prefi=* \ + | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) + program_prefix=$ac_optarg ;; + + -program-suffix | --program-suffix | --program-suffi | --program-suff \ + | --program-suf | --program-su | --program-s) + ac_prev=program_suffix ;; + -program-suffix=* | --program-suffix=* | --program-suffi=* \ + | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) + program_suffix=$ac_optarg ;; + + -program-transform-name | --program-transform-name \ + | --program-transform-nam | --program-transform-na \ + | --program-transform-n | --program-transform- \ + | --program-transform | --program-transfor \ + | --program-transfo | --program-transf \ + | --program-trans | --program-tran \ + | --progr-tra | --program-tr | --program-t) + ac_prev=program_transform_name ;; + -program-transform-name=* | --program-transform-name=* \ + | --program-transform-nam=* | --program-transform-na=* \ + | --program-transform-n=* | --program-transform-=* \ + | --program-transform=* | --program-transfor=* \ + | --program-transfo=* | --program-transf=* \ + | --program-trans=* | --program-tran=* \ + | --progr-tra=* | --program-tr=* | --program-t=*) + program_transform_name=$ac_optarg ;; + + -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) + ac_prev=pdfdir ;; + -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) + pdfdir=$ac_optarg ;; + + -psdir | --psdir | --psdi | --psd | --ps) + ac_prev=psdir ;; + -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) + psdir=$ac_optarg ;; + + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + silent=yes ;; + + -runstatedir | --runstatedir | --runstatedi | --runstated \ + | --runstate | --runstat | --runsta | --runst | --runs \ + | --run | --ru | --r) + ac_prev=runstatedir ;; + -runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \ + | --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \ + | --run=* | --ru=* | --r=*) + runstatedir=$ac_optarg ;; + + -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) + ac_prev=sbindir ;; + -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ + | --sbi=* | --sb=*) + sbindir=$ac_optarg ;; + + -sharedstatedir | --sharedstatedir | --sharedstatedi \ + | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ + | --sharedst | --shareds | --shared | --share | --shar \ + | --sha | --sh) + ac_prev=sharedstatedir ;; + -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ + | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ + | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ + | --sha=* | --sh=*) + sharedstatedir=$ac_optarg ;; + + -site | --site | --sit) + ac_prev=site ;; + -site=* | --site=* | --sit=*) + site=$ac_optarg ;; + + -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) + ac_prev=srcdir ;; + -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) + srcdir=$ac_optarg ;; + + -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ + | --syscon | --sysco | --sysc | --sys | --sy) + ac_prev=sysconfdir ;; + -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ + | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) + sysconfdir=$ac_optarg ;; + + -target | --target | --targe | --targ | --tar | --ta | --t) + ac_prev=target_alias ;; + -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) + target_alias=$ac_optarg ;; + + -v | -verbose | --verbose | --verbos | --verbo | --verb) + verbose=yes ;; + + -version | --version | --versio | --versi | --vers | -V) + ac_init_version=: ;; + + -with-* | --with-*) + ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid package name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"with_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval with_$ac_useropt=\$ac_optarg ;; + + -without-* | --without-*) + ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid package name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"with_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval with_$ac_useropt=no ;; + + --x) + # Obsolete; use --with-x. + with_x=yes ;; + + -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ + | --x-incl | --x-inc | --x-in | --x-i) + ac_prev=x_includes ;; + -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ + | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) + x_includes=$ac_optarg ;; + + -x-libraries | --x-libraries | --x-librarie | --x-librari \ + | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) + ac_prev=x_libraries ;; + -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ + | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) + x_libraries=$ac_optarg ;; + + -*) as_fn_error $? "unrecognized option: \`$ac_option' +Try \`$0 --help' for more information" + ;; + + *=*) + ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` + # Reject names that are not valid shell variable names. + case $ac_envvar in #( + '' | [0-9]* | *[!_$as_cr_alnum]* ) + as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; + esac + eval $ac_envvar=\$ac_optarg + export $ac_envvar ;; + + *) + # FIXME: should be removed in autoconf 3.0. + $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 + expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && + $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 + : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" + ;; + + esac +done + +if test -n "$ac_prev"; then + ac_option=--`echo $ac_prev | sed 's/_/-/g'` + as_fn_error $? "missing argument to $ac_option" +fi + +if test -n "$ac_unrecognized_opts"; then + case $enable_option_checking in + no) ;; + fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; + *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; + esac +fi + +# Check all directory arguments for consistency. +for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ + datadir sysconfdir sharedstatedir localstatedir includedir \ + oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ + libdir localedir mandir runstatedir +do + eval ac_val=\$$ac_var + # Remove trailing slashes. + case $ac_val in + */ ) + ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` + eval $ac_var=\$ac_val;; + esac + # Be sure to have absolute directory names. + case $ac_val in + [\\/$]* | ?:[\\/]* ) continue;; + NONE | '' ) case $ac_var in *prefix ) continue;; esac;; + esac + as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" +done + +# There might be people who depend on the old broken behavior: `$host' +# used to hold the argument of --host etc. +# FIXME: To remove some day. +build=$build_alias +host=$host_alias +target=$target_alias + +# FIXME: To remove some day. +if test "x$host_alias" != x; then + if test "x$build_alias" = x; then + cross_compiling=maybe + elif test "x$build_alias" != "x$host_alias"; then + cross_compiling=yes + fi +fi + +ac_tool_prefix= +test -n "$host_alias" && ac_tool_prefix=$host_alias- + +test "$silent" = yes && exec 6>/dev/null + + +ac_pwd=`pwd` && test -n "$ac_pwd" && +ac_ls_di=`ls -di .` && +ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || + as_fn_error $? "working directory cannot be determined" +test "X$ac_ls_di" = "X$ac_pwd_ls_di" || + as_fn_error $? "pwd does not report name of working directory" + + +# Find the source files, if location was not specified. +if test -z "$srcdir"; then + ac_srcdir_defaulted=yes + # Try the directory containing this script, then the parent directory. + ac_confdir=`$as_dirname -- "$as_myself" || +$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_myself" : 'X\(//\)[^/]' \| \ + X"$as_myself" : 'X\(//\)$' \| \ + X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_myself" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + srcdir=$ac_confdir + if test ! -r "$srcdir/$ac_unique_file"; then + srcdir=.. + fi +else + ac_srcdir_defaulted=no +fi +if test ! -r "$srcdir/$ac_unique_file"; then + test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." + as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" +fi +ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" +ac_abs_confdir=`( + cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" + pwd)` +# When building in place, set srcdir=. +if test "$ac_abs_confdir" = "$ac_pwd"; then + srcdir=. +fi +# Remove unnecessary trailing slashes from srcdir. +# Double slashes in file names in object file debugging info +# mess up M-x gdb in Emacs. +case $srcdir in +*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; +esac +for ac_var in $ac_precious_vars; do + eval ac_env_${ac_var}_set=\${${ac_var}+set} + eval ac_env_${ac_var}_value=\$${ac_var} + eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} + eval ac_cv_env_${ac_var}_value=\$${ac_var} +done + +# +# Report the --help message. +# +if test "$ac_init_help" = "long"; then + # Omit some internal or obsolete options to make the list less imposing. + # This message is too long to be a string in the A/UX 3.1 sh. + cat <<_ACEOF +\`configure' configures RISC-V ISA Simulator ? to adapt to many kinds of systems. + +Usage: $0 [OPTION]... [VAR=VALUE]... + +To assign environment variables (e.g., CC, CFLAGS...), specify them as +VAR=VALUE. See below for descriptions of some of the useful variables. + +Defaults for the options are specified in brackets. + +Configuration: + -h, --help display this help and exit + --help=short display options specific to this package + --help=recursive display the short help of all the included packages + -V, --version display version information and exit + -q, --quiet, --silent do not print \`checking ...' messages + --cache-file=FILE cache test results in FILE [disabled] + -C, --config-cache alias for \`--cache-file=config.cache' + -n, --no-create do not create output files + --srcdir=DIR find the sources in DIR [configure dir or \`..'] + +Installation directories: + --prefix=PREFIX install architecture-independent files in PREFIX + [$ac_default_prefix] + --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX + [PREFIX] + +By default, \`make install' will install all the files in +\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify +an installation prefix other than \`$ac_default_prefix' using \`--prefix', +for instance \`--prefix=\$HOME'. + +For better control, use the options below. + +Fine tuning of the installation directories: + --bindir=DIR user executables [EPREFIX/bin] + --sbindir=DIR system admin executables [EPREFIX/sbin] + --libexecdir=DIR program executables [EPREFIX/libexec] + --sysconfdir=DIR read-only single-machine data [PREFIX/etc] + --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] + --localstatedir=DIR modifiable single-machine data [PREFIX/var] + --runstatedir=DIR modifiable per-process data [LOCALSTATEDIR/run] + --libdir=DIR object code libraries [EPREFIX/lib] + --includedir=DIR C header files [PREFIX/include] + --oldincludedir=DIR C header files for non-gcc [/usr/include] + --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] + --datadir=DIR read-only architecture-independent data [DATAROOTDIR] + --infodir=DIR info documentation [DATAROOTDIR/info] + --localedir=DIR locale-dependent data [DATAROOTDIR/locale] + --mandir=DIR man documentation [DATAROOTDIR/man] + --docdir=DIR documentation root [DATAROOTDIR/doc/spike] + --htmldir=DIR html documentation [DOCDIR] + --dvidir=DIR dvi documentation [DOCDIR] + --pdfdir=DIR pdf documentation [DOCDIR] + --psdir=DIR ps documentation [DOCDIR] +_ACEOF + + cat <<\_ACEOF + +System types: + --build=BUILD configure for building on BUILD [guessed] + --host=HOST cross-compile to build programs to run on HOST [BUILD] +_ACEOF +fi + +if test -n "$ac_init_help"; then + case $ac_init_help in + short | recursive ) echo "Configuration of RISC-V ISA Simulator ?:";; + esac + cat <<\_ACEOF + +Optional Features: + --disable-option-checking ignore unrecognized --enable/--with options + --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) + --enable-FEATURE[=ARG] include FEATURE [ARG=yes] + --enable-stow Enable stow-based install + --enable-optional-subprojects + Enable all optional subprojects + --enable-commitlog Enable commit log generation + --enable-histogram Enable PC histogram generation + --enable-dirty Enable hardware management of PTE accessed and dirty + bits + --enable-misaligned Enable hardware support for misaligned loads and + stores + --enable-dual-endian Enable support for running target in either + endianness + +Optional Packages: + --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] + --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) + --with-boost[=ARG] use Boost library from a standard location + (ARG=yes), from the specified location (ARG=), + or disable it (ARG=no) [ARG=yes] + --with-boost-libdir=LIB_DIR + Force given directory for boost libraries. Note that + this will override library path detection, so use + this parameter only if default library detection + fails and you know exactly where your boost + libraries are located. + --with-boost-asio[=special-lib] + use the ASIO library from boost - it is possible to + specify a certain library for the linker e.g. + --with-boost-asio=boost_system-gcc41-mt-1_34 + --with-boost-regex[=special-lib] + use the Regex library from boost - it is possible to + specify a certain library for the linker e.g. + --with-boost-regex=boost_regex-gcc-mt-d-1_33_1 + --with-isa=RV64IMAFDC Sets the default RISC-V ISA + --with-priv=MSU Sets the default RISC-V privilege modes supported + --with-varch=vlen:128,elen:64 + Sets the default vector config + --with-target=riscv64-unknown-elf + Sets the default target config + +Some influential environment variables: + CC C compiler command + CFLAGS C compiler flags + LDFLAGS linker flags, e.g. -L if you have libraries in a + nonstandard directory + LIBS libraries to pass to the linker, e.g. -l + CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if + you have headers in a nonstandard directory + CXX C++ compiler command + CXXFLAGS C++ compiler flags + CXXCPP C++ preprocessor + STOW_ROOT Root for non-native stow-based installs + STOW_PREFIX Prefix for stow-based installs + +Use these variables to override the choices made by `configure' or to help +it to find libraries and programs with nonstandard names/locations. + +Report bugs to . +_ACEOF +ac_status=$? +fi + +if test "$ac_init_help" = "recursive"; then + # If there are subdirs, report their specific --help. + for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue + test -d "$ac_dir" || + { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || + continue + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + cd "$ac_dir" || { ac_status=$?; continue; } + # Check for guested configure. + if test -f "$ac_srcdir/configure.gnu"; then + echo && + $SHELL "$ac_srcdir/configure.gnu" --help=recursive + elif test -f "$ac_srcdir/configure"; then + echo && + $SHELL "$ac_srcdir/configure" --help=recursive + else + $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 + fi || ac_status=$? + cd "$ac_pwd" || { ac_status=$?; break; } + done +fi + +test -n "$ac_init_help" && exit $ac_status +if $ac_init_version; then + cat <<\_ACEOF +RISC-V ISA Simulator configure ? +generated by GNU Autoconf 2.69 + +Copyright (C) 2012 Free Software Foundation, Inc. +This configure script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it. +_ACEOF + exit +fi + +## ------------------------ ## +## Autoconf initialization. ## +## ------------------------ ## + +# ac_fn_c_try_compile LINENO +# -------------------------- +# Try to compile conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext + if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_compile + +# ac_fn_cxx_try_compile LINENO +# ---------------------------- +# Try to compile conftest.$ac_ext, and return whether this succeeded. +ac_fn_cxx_try_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext + if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_cxx_try_compile + +# ac_fn_cxx_try_run LINENO +# ------------------------ +# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes +# that executables *can* be run. +ac_fn_cxx_try_run () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' + { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; }; then : + ac_retval=0 +else + $as_echo "$as_me: program exited with status $ac_status" >&5 + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=$ac_status +fi + rm -rf conftest.dSYM conftest_ipa8_conftest.oo + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_cxx_try_run + +# ac_fn_cxx_try_cpp LINENO +# ------------------------ +# Try to preprocess conftest.$ac_ext, and return whether this succeeded. +ac_fn_cxx_try_cpp () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if { { ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } > conftest.i && { + test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || + test ! -s conftest.err + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_cxx_try_cpp + +# ac_fn_cxx_check_header_compile LINENO HEADER VAR INCLUDES +# --------------------------------------------------------- +# Tests whether HEADER exists and can be compiled using the include files in +# INCLUDES, setting the cache variable VAR accordingly. +ac_fn_cxx_check_header_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +#include <$2> +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + eval "$3=yes" +else + eval "$3=no" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_cxx_check_header_compile + +# ac_fn_cxx_check_type LINENO TYPE VAR INCLUDES +# --------------------------------------------- +# Tests whether TYPE exists after having included INCLUDES, setting cache +# variable VAR accordingly. +ac_fn_cxx_check_type () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + eval "$3=no" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +if (sizeof ($2)) + return 0; + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +if (sizeof (($2))) + return 0; + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + +else + eval "$3=yes" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_cxx_check_type + +# ac_fn_cxx_try_link LINENO +# ------------------------- +# Try to link conftest.$ac_ext, and return whether this succeeded. +ac_fn_cxx_try_link () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext conftest$ac_exeext + if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + test -x conftest$ac_exeext + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information + # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would + # interfere with the next link command; also delete a directory that is + # left behind by Apple's compiler. We do this before executing the actions. + rm -rf conftest.dSYM conftest_ipa8_conftest.oo + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_cxx_try_link + +# ac_fn_cxx_check_member LINENO AGGR MEMBER VAR INCLUDES +# ------------------------------------------------------ +# Tries to find if the field MEMBER exists in type AGGR, after including +# INCLUDES, setting cache variable VAR accordingly. +ac_fn_cxx_check_member () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2.$3" >&5 +$as_echo_n "checking for $2.$3... " >&6; } +if eval \${$4+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$5 +int +main () +{ +static $2 ac_aggr; +if (ac_aggr.$3) +return 0; + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + eval "$4=yes" +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$5 +int +main () +{ +static $2 ac_aggr; +if (sizeof ac_aggr.$3) +return 0; + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + eval "$4=yes" +else + eval "$4=no" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +eval ac_res=\$$4 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_cxx_check_member +cat >config.log <<_ACEOF +This file contains any messages produced by compilers while +running configure, to aid debugging if configure makes a mistake. + +It was created by RISC-V ISA Simulator $as_me ?, which was +generated by GNU Autoconf 2.69. Invocation command line was + + $ $0 $@ + +_ACEOF +exec 5>>config.log +{ +cat <<_ASUNAME +## --------- ## +## Platform. ## +## --------- ## + +hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` +uname -m = `(uname -m) 2>/dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` + +/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` +/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` +/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` +/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` + +_ASUNAME + +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + $as_echo "PATH: $as_dir" + done +IFS=$as_save_IFS + +} >&5 + +cat >&5 <<_ACEOF + + +## ----------- ## +## Core tests. ## +## ----------- ## + +_ACEOF + + +# Keep a trace of the command line. +# Strip out --no-create and --no-recursion so they do not pile up. +# Strip out --silent because we don't want to record it for future runs. +# Also quote any args containing shell meta-characters. +# Make two passes to allow for proper duplicate-argument suppression. +ac_configure_args= +ac_configure_args0= +ac_configure_args1= +ac_must_keep_next=false +for ac_pass in 1 2 +do + for ac_arg + do + case $ac_arg in + -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + continue ;; + *\'*) + ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + case $ac_pass in + 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; + 2) + as_fn_append ac_configure_args1 " '$ac_arg'" + if test $ac_must_keep_next = true; then + ac_must_keep_next=false # Got value, back to normal. + else + case $ac_arg in + *=* | --config-cache | -C | -disable-* | --disable-* \ + | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ + | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ + | -with-* | --with-* | -without-* | --without-* | --x) + case "$ac_configure_args0 " in + "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; + esac + ;; + -* ) ac_must_keep_next=true ;; + esac + fi + as_fn_append ac_configure_args " '$ac_arg'" + ;; + esac + done +done +{ ac_configure_args0=; unset ac_configure_args0;} +{ ac_configure_args1=; unset ac_configure_args1;} + +# When interrupted or exit'd, cleanup temporary files, and complete +# config.log. We remove comments because anyway the quotes in there +# would cause problems or look ugly. +# WARNING: Use '\'' to represent an apostrophe within the trap. +# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. +trap 'exit_status=$? + # Save into config.log some information that might help in debugging. + { + echo + + $as_echo "## ---------------- ## +## Cache variables. ## +## ---------------- ##" + echo + # The following way of writing the cache mishandles newlines in values, +( + for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do + eval ac_val=\$$ac_var + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( + *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 +$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( + BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( + *) { eval $ac_var=; unset $ac_var;} ;; + esac ;; + esac + done + (set) 2>&1 | + case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( + *${as_nl}ac_space=\ *) + sed -n \ + "s/'\''/'\''\\\\'\'''\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" + ;; #( + *) + sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" + ;; + esac | + sort +) + echo + + $as_echo "## ----------------- ## +## Output variables. ## +## ----------------- ##" + echo + for ac_var in $ac_subst_vars + do + eval ac_val=\$$ac_var + case $ac_val in + *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac + $as_echo "$ac_var='\''$ac_val'\''" + done | sort + echo + + if test -n "$ac_subst_files"; then + $as_echo "## ------------------- ## +## File substitutions. ## +## ------------------- ##" + echo + for ac_var in $ac_subst_files + do + eval ac_val=\$$ac_var + case $ac_val in + *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac + $as_echo "$ac_var='\''$ac_val'\''" + done | sort + echo + fi + + if test -s confdefs.h; then + $as_echo "## ----------- ## +## confdefs.h. ## +## ----------- ##" + echo + cat confdefs.h + echo + fi + test "$ac_signal" != 0 && + $as_echo "$as_me: caught signal $ac_signal" + $as_echo "$as_me: exit $exit_status" + } >&5 + rm -f core *.core core.conftest.* && + rm -f -r conftest* confdefs* conf$$* $ac_clean_files && + exit $exit_status +' 0 +for ac_signal in 1 2 13 15; do + trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal +done +ac_signal=0 + +# confdefs.h avoids OS command line length limits that DEFS can exceed. +rm -f -r conftest* confdefs.h + +$as_echo "/* confdefs.h */" > confdefs.h + +# Predefined preprocessor variables. + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_NAME "$PACKAGE_NAME" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_TARNAME "$PACKAGE_TARNAME" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_VERSION "$PACKAGE_VERSION" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_STRING "$PACKAGE_STRING" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_URL "$PACKAGE_URL" +_ACEOF + + +# Let the site file select an alternate cache file if it wants to. +# Prefer an explicitly selected file to automatically selected ones. +ac_site_file1=NONE +ac_site_file2=NONE +if test -n "$CONFIG_SITE"; then + # We do not want a PATH search for config.site. + case $CONFIG_SITE in #(( + -*) ac_site_file1=./$CONFIG_SITE;; + */*) ac_site_file1=$CONFIG_SITE;; + *) ac_site_file1=./$CONFIG_SITE;; + esac +elif test "x$prefix" != xNONE; then + ac_site_file1=$prefix/share/config.site + ac_site_file2=$prefix/etc/config.site +else + ac_site_file1=$ac_default_prefix/share/config.site + ac_site_file2=$ac_default_prefix/etc/config.site +fi +for ac_site_file in "$ac_site_file1" "$ac_site_file2" +do + test "x$ac_site_file" = xNONE && continue + if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 +$as_echo "$as_me: loading site script $ac_site_file" >&6;} + sed 's/^/| /' "$ac_site_file" >&5 + . "$ac_site_file" \ + || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "failed to load site script $ac_site_file +See \`config.log' for more details" "$LINENO" 5; } + fi +done + +if test -r "$cache_file"; then + # Some versions of bash will fail to source /dev/null (special files + # actually), so we avoid doing that. DJGPP emulates it as a regular file. + if test /dev/null != "$cache_file" && test -f "$cache_file"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 +$as_echo "$as_me: loading cache $cache_file" >&6;} + case $cache_file in + [\\/]* | ?:[\\/]* ) . "$cache_file";; + *) . "./$cache_file";; + esac + fi +else + { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 +$as_echo "$as_me: creating cache $cache_file" >&6;} + >$cache_file +fi + +# Check that the precious variables saved in the cache have kept the same +# value. +ac_cache_corrupted=false +for ac_var in $ac_precious_vars; do + eval ac_old_set=\$ac_cv_env_${ac_var}_set + eval ac_new_set=\$ac_env_${ac_var}_set + eval ac_old_val=\$ac_cv_env_${ac_var}_value + eval ac_new_val=\$ac_env_${ac_var}_value + case $ac_old_set,$ac_new_set in + set,) + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 +$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,set) + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 +$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,);; + *) + if test "x$ac_old_val" != "x$ac_new_val"; then + # differences in whitespace do not lead to failure. + ac_old_val_w=`echo x $ac_old_val` + ac_new_val_w=`echo x $ac_new_val` + if test "$ac_old_val_w" != "$ac_new_val_w"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 +$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} + ac_cache_corrupted=: + else + { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 +$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} + eval $ac_var=\$ac_old_val + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 +$as_echo "$as_me: former value: \`$ac_old_val'" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 +$as_echo "$as_me: current value: \`$ac_new_val'" >&2;} + fi;; + esac + # Pass precious variables to config.status. + if test "$ac_new_set" = set; then + case $ac_new_val in + *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; + *) ac_arg=$ac_var=$ac_new_val ;; + esac + case " $ac_configure_args " in + *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. + *) as_fn_append ac_configure_args " '$ac_arg'" ;; + esac + fi +done +if $ac_cache_corrupted; then + { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 +$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} + as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 +fi +## -------------------- ## +## Main body of script. ## +## -------------------- ## + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + +ac_aux_dir= +for ac_dir in scripts "$srcdir"/scripts; do + if test -f "$ac_dir/install-sh"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install-sh -c" + break + elif test -f "$ac_dir/install.sh"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install.sh -c" + break + elif test -f "$ac_dir/shtool"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/shtool install -c" + break + fi +done +if test -z "$ac_aux_dir"; then + as_fn_error $? "cannot find install-sh, install.sh, or shtool in scripts \"$srcdir\"/scripts" "$LINENO" 5 +fi + +# These three variables are undocumented and unsupported, +# and are intended to be withdrawn in a future Autoconf release. +# They can cause serious problems if a builder's source tree is in a directory +# whose full name contains unusual characters. +ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. +ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. +ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. + + +# Make sure we can run config.sub. +$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || + as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5 + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5 +$as_echo_n "checking build system type... " >&6; } +if ${ac_cv_build+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_build_alias=$build_alias +test "x$ac_build_alias" = x && + ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"` +test "x$ac_build_alias" = x && + as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5 +ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` || + as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5 + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5 +$as_echo "$ac_cv_build" >&6; } +case $ac_cv_build in +*-*-*) ;; +*) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;; +esac +build=$ac_cv_build +ac_save_IFS=$IFS; IFS='-' +set x $ac_cv_build +shift +build_cpu=$1 +build_vendor=$2 +shift; shift +# Remember, the first character of IFS is used to create $*, +# except with old shells: +build_os=$* +IFS=$ac_save_IFS +case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5 +$as_echo_n "checking host system type... " >&6; } +if ${ac_cv_host+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "x$host_alias" = x; then + ac_cv_host=$ac_cv_build +else + ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || + as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5 +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5 +$as_echo "$ac_cv_host" >&6; } +case $ac_cv_host in +*-*-*) ;; +*) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;; +esac +host=$ac_cv_host +ac_save_IFS=$IFS; IFS='-' +set x $ac_cv_host +shift +host_cpu=$1 +host_vendor=$2 +shift; shift +# Remember, the first character of IFS is used to create $*, +# except with old shells: +host_os=$* +IFS=$ac_save_IFS +case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac + + + +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_require_defined.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_REQUIRE_DEFINED(MACRO) +# +# DESCRIPTION +# +# AX_REQUIRE_DEFINED is a simple helper for making sure other macros have +# been defined and thus are available for use. This avoids random issues +# where a macro isn't expanded. Instead the configure script emits a +# non-fatal: +# +# ./configure: line 1673: AX_CFLAGS_WARN_ALL: command not found +# +# It's like AC_REQUIRE except it doesn't expand the required macro. +# +# Here's an example: +# +# AX_REQUIRE_DEFINED([AX_CHECK_LINK_FLAG]) +# +# LICENSE +# +# Copyright (c) 2014 Mike Frysinger +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 2 + + +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_append_flag.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_APPEND_FLAG(FLAG, [FLAGS-VARIABLE]) +# +# DESCRIPTION +# +# FLAG is appended to the FLAGS-VARIABLE shell variable, with a space +# added in between. +# +# If FLAGS-VARIABLE is not specified, the current language's flags (e.g. +# CFLAGS) is used. FLAGS-VARIABLE is not changed if it already contains +# FLAG. If FLAGS-VARIABLE is unset in the shell, it is set to exactly +# FLAG. +# +# NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. +# +# LICENSE +# +# Copyright (c) 2008 Guido U. Draheim +# Copyright (c) 2011 Maarten Bosmans +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 8 + + +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_check_compile_flag.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_CHECK_COMPILE_FLAG(FLAG, [ACTION-SUCCESS], [ACTION-FAILURE], [EXTRA-FLAGS], [INPUT]) +# +# DESCRIPTION +# +# Check whether the given FLAG works with the current language's compiler +# or gives an error. (Warnings, however, are ignored) +# +# ACTION-SUCCESS/ACTION-FAILURE are shell commands to execute on +# success/failure. +# +# If EXTRA-FLAGS is defined, it is added to the current language's default +# flags (e.g. CFLAGS) when the check is done. The check is thus made with +# the flags: "CFLAGS EXTRA-FLAGS FLAG". This can for example be used to +# force the compiler to issue an error when a bad flag is given. +# +# INPUT gives an alternative input source to AC_COMPILE_IFELSE. +# +# NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. Please keep this +# macro in sync with AX_CHECK_{PREPROC,LINK}_FLAG. +# +# LICENSE +# +# Copyright (c) 2008 Guido U. Draheim +# Copyright (c) 2011 Maarten Bosmans +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 6 + + +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_check_link_flag.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_CHECK_LINK_FLAG(FLAG, [ACTION-SUCCESS], [ACTION-FAILURE], [EXTRA-FLAGS], [INPUT]) +# +# DESCRIPTION +# +# Check whether the given FLAG works with the linker or gives an error. +# (Warnings, however, are ignored) +# +# ACTION-SUCCESS/ACTION-FAILURE are shell commands to execute on +# success/failure. +# +# If EXTRA-FLAGS is defined, it is added to the linker's default flags +# when the check is done. The check is thus made with the flags: "LDFLAGS +# EXTRA-FLAGS FLAG". This can for example be used to force the linker to +# issue an error when a bad flag is given. +# +# INPUT gives an alternative input source to AC_LINK_IFELSE. +# +# NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. Please keep this +# macro in sync with AX_CHECK_{PREPROC,COMPILE}_FLAG. +# +# LICENSE +# +# Copyright (c) 2008 Guido U. Draheim +# Copyright (c) 2011 Maarten Bosmans +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 6 + + +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_append_link_flags.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_APPEND_LINK_FLAGS([FLAG1 FLAG2 ...], [FLAGS-VARIABLE], [EXTRA-FLAGS], [INPUT]) +# +# DESCRIPTION +# +# For every FLAG1, FLAG2 it is checked whether the linker works with the +# flag. If it does, the flag is added FLAGS-VARIABLE +# +# If FLAGS-VARIABLE is not specified, the linker's flags (LDFLAGS) is +# used. During the check the flag is always added to the linker's flags. +# +# If EXTRA-FLAGS is defined, it is added to the linker's default flags +# when the check is done. The check is thus made with the flags: "LDFLAGS +# EXTRA-FLAGS FLAG". This can for example be used to force the linker to +# issue an error when a bad flag is given. +# +# INPUT gives an alternative input source to AC_COMPILE_IFELSE. +# +# NOTE: This macro depends on the AX_APPEND_FLAG and AX_CHECK_LINK_FLAG. +# Please keep this macro in sync with AX_APPEND_COMPILE_FLAGS. +# +# LICENSE +# +# Copyright (c) 2011 Maarten Bosmans +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 7 + + +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_boost_base.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_BOOST_BASE([MINIMUM-VERSION], [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) +# +# DESCRIPTION +# +# Test for the Boost C++ libraries of a particular version (or newer) +# +# If no path to the installed boost library is given the macro searchs +# under /usr, /usr/local, /opt and /opt/local and evaluates the +# $BOOST_ROOT environment variable. Further documentation is available at +# . +# +# This macro calls: +# +# AC_SUBST(BOOST_CPPFLAGS) / AC_SUBST(BOOST_LDFLAGS) +# +# And sets: +# +# HAVE_BOOST +# +# LICENSE +# +# Copyright (c) 2008 Thomas Porschberg +# Copyright (c) 2009 Peter Adolphs +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 49 + +# example boost program (need to pass version) + + + + + +# convert a version string in $2 to numeric and affect to polymorphic var $1 + + + + +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_boost_asio.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_BOOST_ASIO +# +# DESCRIPTION +# +# Test for Asio library from the Boost C++ libraries. The macro requires a +# preceding call to AX_BOOST_BASE. Further documentation is available at +# . +# +# This macro calls: +# +# AC_SUBST(BOOST_ASIO_LIB) +# +# And sets: +# +# HAVE_BOOST_ASIO +# +# LICENSE +# +# Copyright (c) 2008 Thomas Porschberg +# Copyright (c) 2008 Pete Greenwell +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 18 + + + +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_boost_regex.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_BOOST_REGEX +# +# DESCRIPTION +# +# Test for Regex library from the Boost C++ libraries. The macro requires +# a preceding call to AX_BOOST_BASE. Further documentation is available at +# . +# +# This macro calls: +# +# AC_SUBST(BOOST_REGEX_LIB) +# +# And sets: +# +# HAVE_BOOST_REGEX +# +# LICENSE +# +# Copyright (c) 2008 Thomas Porschberg +# Copyright (c) 2008 Michael Tindal +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 23 + + + + +#------------------------------------------------------------------------- +# Checks for programs +#------------------------------------------------------------------------- + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. +set dummy ${ac_tool_prefix}gcc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="${ac_tool_prefix}gcc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_CC"; then + ac_ct_CC=$CC + # Extract the first word of "gcc", so it can be a program name with args. +set dummy gcc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CC="gcc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +$as_echo "$ac_ct_CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +else + CC="$ac_cv_prog_CC" +fi + +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. +set dummy ${ac_tool_prefix}cc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="${ac_tool_prefix}cc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + fi +fi +if test -z "$CC"; then + # Extract the first word of "cc", so it can be a program name with args. +set dummy cc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else + ac_prog_rejected=no +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then + ac_prog_rejected=yes + continue + fi + ac_cv_prog_CC="cc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +if test $ac_prog_rejected = yes; then + # We found a bogon in the path, so make sure we never use it. + set dummy $ac_cv_prog_CC + shift + if test $# != 0; then + # We chose a different compiler from the bogus one. + # However, it has the same basename, so the bogon will be chosen + # first if we set CC to just the basename; use the full file name. + shift + ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" + fi +fi +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + for ac_prog in cl.exe + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$CC" && break + done +fi +if test -z "$CC"; then + ac_ct_CC=$CC + for ac_prog in cl.exe +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CC="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +$as_echo "$ac_ct_CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_CC" && break +done + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +fi + +fi + + +test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "no acceptable C compiler found in \$PATH +See \`config.log' for more details" "$LINENO" 5; } + +# Provide some information about the compiler. +$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 +set X $ac_compile +ac_compiler=$2 +for ac_option in --version -v -V -qversion; do + { { ac_try="$ac_compiler $ac_option >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compiler $ac_option >&5") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + sed '10a\ +... rest of stderr output deleted ... + 10q' conftest.err >conftest.er1 + cat conftest.er1 >&5 + fi + rm -f conftest.er1 conftest.err + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } +done + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" +# Try to create an executable without -o first, disregard a.out. +# It will help us diagnose broken compilers, and finding out an intuition +# of exeext. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 +$as_echo_n "checking whether the C compiler works... " >&6; } +ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` + +# The possible output files: +ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" + +ac_rmfiles= +for ac_file in $ac_files +do + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; + * ) ac_rmfiles="$ac_rmfiles $ac_file";; + esac +done +rm -f $ac_rmfiles + +if { { ac_try="$ac_link_default" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link_default") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. +# So ignore a value of `no', otherwise this would lead to `EXEEXT = no' +# in a Makefile. We should not override ac_cv_exeext if it was cached, +# so that the user can short-circuit this test for compilers unknown to +# Autoconf. +for ac_file in $ac_files '' +do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) + ;; + [ab].out ) + # We found the default executable, but exeext='' is most + # certainly right. + break;; + *.* ) + if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; + then :; else + ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + fi + # We set ac_cv_exeext here because the later test for it is not + # safe: cross compilers may not add the suffix if given an `-o' + # argument, so we may need to know it at that point already. + # Even if this section looks crufty: it has the advantage of + # actually working. + break;; + * ) + break;; + esac +done +test "$ac_cv_exeext" = no && ac_cv_exeext= + +else + ac_file='' +fi +if test -z "$ac_file"; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +$as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error 77 "C compiler cannot create executables +See \`config.log' for more details" "$LINENO" 5; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 +$as_echo_n "checking for C compiler default output file name... " >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 +$as_echo "$ac_file" >&6; } +ac_exeext=$ac_cv_exeext + +rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out +ac_clean_files=$ac_clean_files_save +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 +$as_echo_n "checking for suffix of executables... " >&6; } +if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + # If both `conftest.exe' and `conftest' are `present' (well, observable) +# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will +# work properly (i.e., refer to `conftest.exe'), while it won't with +# `rm'. +for ac_file in conftest.exe conftest conftest.*; do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; + *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + break;; + * ) break;; + esac +done +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot compute suffix of executables: cannot compile and link +See \`config.log' for more details" "$LINENO" 5; } +fi +rm -f conftest conftest$ac_cv_exeext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 +$as_echo "$ac_cv_exeext" >&6; } + +rm -f conftest.$ac_ext +EXEEXT=$ac_cv_exeext +ac_exeext=$EXEEXT +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +FILE *f = fopen ("conftest.out", "w"); + return ferror (f) || fclose (f) != 0; + + ; + return 0; +} +_ACEOF +ac_clean_files="$ac_clean_files conftest.out" +# Check that the compiler produces executables we can run. If not, either +# the compiler is broken, or we cross compile. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 +$as_echo_n "checking whether we are cross compiling... " >&6; } +if test "$cross_compiling" != yes; then + { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + if { ac_try='./conftest$ac_cv_exeext' + { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; }; then + cross_compiling=no + else + if test "$cross_compiling" = maybe; then + cross_compiling=yes + else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot run C compiled programs. +If you meant to cross compile, use \`--host'. +See \`config.log' for more details" "$LINENO" 5; } + fi + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 +$as_echo "$cross_compiling" >&6; } + +rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out +ac_clean_files=$ac_clean_files_save +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 +$as_echo_n "checking for suffix of object files... " >&6; } +if ${ac_cv_objext+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.o conftest.obj +if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + for ac_file in conftest.o conftest.obj conftest.*; do + test -f "$ac_file" || continue; + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; + *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` + break;; + esac +done +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot compute suffix of object files: cannot compile +See \`config.log' for more details" "$LINENO" 5; } +fi +rm -f conftest.$ac_cv_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 +$as_echo "$ac_cv_objext" >&6; } +OBJEXT=$ac_cv_objext +ac_objext=$OBJEXT +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 +$as_echo_n "checking whether we are using the GNU C compiler... " >&6; } +if ${ac_cv_c_compiler_gnu+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ +#ifndef __GNUC__ + choke me +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_compiler_gnu=yes +else + ac_compiler_gnu=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_c_compiler_gnu=$ac_compiler_gnu + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 +$as_echo "$ac_cv_c_compiler_gnu" >&6; } +if test $ac_compiler_gnu = yes; then + GCC=yes +else + GCC= +fi +ac_test_CFLAGS=${CFLAGS+set} +ac_save_CFLAGS=$CFLAGS +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 +$as_echo_n "checking whether $CC accepts -g... " >&6; } +if ${ac_cv_prog_cc_g+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_save_c_werror_flag=$ac_c_werror_flag + ac_c_werror_flag=yes + ac_cv_prog_cc_g=no + CFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_g=yes +else + CFLAGS="" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + +else + ac_c_werror_flag=$ac_save_c_werror_flag + CFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_g=yes +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_c_werror_flag=$ac_save_c_werror_flag +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 +$as_echo "$ac_cv_prog_cc_g" >&6; } +if test "$ac_test_CFLAGS" = set; then + CFLAGS=$ac_save_CFLAGS +elif test $ac_cv_prog_cc_g = yes; then + if test "$GCC" = yes; then + CFLAGS="-g -O2" + else + CFLAGS="-g" + fi +else + if test "$GCC" = yes; then + CFLAGS="-O2" + else + CFLAGS= + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 +$as_echo_n "checking for $CC option to accept ISO C89... " >&6; } +if ${ac_cv_prog_cc_c89+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_cv_prog_cc_c89=no +ac_save_CC=$CC +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +struct stat; +/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ +struct buf { int x; }; +FILE * (*rcsopen) (struct buf *, struct stat *, int); +static char *e (p, i) + char **p; + int i; +{ + return p[i]; +} +static char *f (char * (*g) (char **, int), char **p, ...) +{ + char *s; + va_list v; + va_start (v,p); + s = g (p, va_arg (v,int)); + va_end (v); + return s; +} + +/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has + function prototypes and stuff, but not '\xHH' hex character constants. + These don't provoke an error unfortunately, instead are silently treated + as 'x'. The following induces an error, until -std is added to get + proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an + array size at least. It's necessary to write '\x00'==0 to get something + that's true only with -std. */ +int osf4_cc_array ['\x00' == 0 ? 1 : -1]; + +/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters + inside strings and character constants. */ +#define FOO(x) 'x' +int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; + +int test (int i, double x); +struct s1 {int (*f) (int a);}; +struct s2 {int (*f) (double a);}; +int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); +int argc; +char **argv; +int +main () +{ +return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; + ; + return 0; +} +_ACEOF +for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ + -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" +do + CC="$ac_save_CC $ac_arg" + if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_c89=$ac_arg +fi +rm -f core conftest.err conftest.$ac_objext + test "x$ac_cv_prog_cc_c89" != "xno" && break +done +rm -f conftest.$ac_ext +CC=$ac_save_CC + +fi +# AC_CACHE_VAL +case "x$ac_cv_prog_cc_c89" in + x) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +$as_echo "none needed" >&6; } ;; + xno) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +$as_echo "unsupported" >&6; } ;; + *) + CC="$CC $ac_cv_prog_cc_c89" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 +$as_echo "$ac_cv_prog_cc_c89" >&6; } ;; +esac +if test "x$ac_cv_prog_cc_c89" != xno; then : + +fi + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu +if test -z "$CXX"; then + if test -n "$CCC"; then + CXX=$CCC + else + if test -n "$ac_tool_prefix"; then + for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CXX"; then + ac_cv_prog_CXX="$CXX" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CXX="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CXX=$ac_cv_prog_CXX +if test -n "$CXX"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5 +$as_echo "$CXX" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$CXX" && break + done +fi +if test -z "$CXX"; then + ac_ct_CXX=$CXX + for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CXX"; then + ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CXX="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CXX=$ac_cv_prog_ac_ct_CXX +if test -n "$ac_ct_CXX"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5 +$as_echo "$ac_ct_CXX" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_CXX" && break +done + + if test "x$ac_ct_CXX" = x; then + CXX="g++" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CXX=$ac_ct_CXX + fi +fi + + fi +fi +# Provide some information about the compiler. +$as_echo "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5 +set X $ac_compile +ac_compiler=$2 +for ac_option in --version -v -V -qversion; do + { { ac_try="$ac_compiler $ac_option >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compiler $ac_option >&5") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + sed '10a\ +... rest of stderr output deleted ... + 10q' conftest.err >conftest.er1 + cat conftest.er1 >&5 + fi + rm -f conftest.er1 conftest.err + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } +done + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C++ compiler" >&5 +$as_echo_n "checking whether we are using the GNU C++ compiler... " >&6; } +if ${ac_cv_cxx_compiler_gnu+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ +#ifndef __GNUC__ + choke me +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_compiler_gnu=yes +else + ac_compiler_gnu=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_cxx_compiler_gnu=$ac_compiler_gnu + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5 +$as_echo "$ac_cv_cxx_compiler_gnu" >&6; } +if test $ac_compiler_gnu = yes; then + GXX=yes +else + GXX= +fi +ac_test_CXXFLAGS=${CXXFLAGS+set} +ac_save_CXXFLAGS=$CXXFLAGS +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5 +$as_echo_n "checking whether $CXX accepts -g... " >&6; } +if ${ac_cv_prog_cxx_g+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_save_cxx_werror_flag=$ac_cxx_werror_flag + ac_cxx_werror_flag=yes + ac_cv_prog_cxx_g=no + CXXFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_cv_prog_cxx_g=yes +else + CXXFLAGS="" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + +else + ac_cxx_werror_flag=$ac_save_cxx_werror_flag + CXXFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_cv_prog_cxx_g=yes +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_cxx_werror_flag=$ac_save_cxx_werror_flag +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5 +$as_echo "$ac_cv_prog_cxx_g" >&6; } +if test "$ac_test_CXXFLAGS" = set; then + CXXFLAGS=$ac_save_CXXFLAGS +elif test $ac_cv_prog_cxx_g = yes; then + if test "$GXX" = yes; then + CXXFLAGS="-g -O2" + else + CXXFLAGS="-g" + fi +else + if test "$GXX" = yes; then + CXXFLAGS="-O2" + else + CXXFLAGS= + fi +fi +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args. +set dummy ${ac_tool_prefix}ar; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_AR+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$AR"; then + ac_cv_prog_AR="$AR" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_AR="${ac_tool_prefix}ar" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +AR=$ac_cv_prog_AR +if test -n "$AR"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AR" >&5 +$as_echo "$AR" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_AR"; then + ac_ct_AR=$AR + # Extract the first word of "ar", so it can be a program name with args. +set dummy ar; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_AR+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_AR"; then + ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_AR="ar" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_AR=$ac_cv_prog_ac_ct_AR +if test -n "$ac_ct_AR"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5 +$as_echo "$ac_ct_AR" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_AR" = x; then + AR="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + AR=$ac_ct_AR + fi +else + AR="$ac_cv_prog_AR" +fi + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. +set dummy ${ac_tool_prefix}ranlib; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_RANLIB+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$RANLIB"; then + ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +RANLIB=$ac_cv_prog_RANLIB +if test -n "$RANLIB"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5 +$as_echo "$RANLIB" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_RANLIB"; then + ac_ct_RANLIB=$RANLIB + # Extract the first word of "ranlib", so it can be a program name with args. +set dummy ranlib; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_RANLIB+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_RANLIB"; then + ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_RANLIB="ranlib" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB +if test -n "$ac_ct_RANLIB"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5 +$as_echo "$ac_ct_RANLIB" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_RANLIB" = x; then + RANLIB="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + RANLIB=$ac_ct_RANLIB + fi +else + RANLIB="$ac_cv_prog_RANLIB" +fi + +# Extract the first word of "dtc", so it can be a program name with args. +set dummy dtc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_DTC+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $DTC in + [\\/]* | ?:[\\/]*) + ac_cv_path_DTC="$DTC" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_DTC="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_path_DTC" && ac_cv_path_DTC="no" + ;; +esac +fi +DTC=$ac_cv_path_DTC +if test -n "$DTC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DTC" >&5 +$as_echo "$DTC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +if test x"$DTC" == xno; then : + as_fn_error $? "device-tree-compiler not found" "$LINENO" 5 +fi + +cat >>confdefs.h <<_ACEOF +#define DTC "dtc" +_ACEOF + + + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C++ preprocessor" >&5 +$as_echo_n "checking how to run the C++ preprocessor... " >&6; } +if test -z "$CXXCPP"; then + if ${ac_cv_prog_CXXCPP+:} false; then : + $as_echo_n "(cached) " >&6 +else + # Double quotes because CXXCPP needs to be expanded + for CXXCPP in "$CXX -E" "/lib/cpp" + do + ac_preproc_ok=false +for ac_cxx_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + break +fi + + done + ac_cv_prog_CXXCPP=$CXXCPP + +fi + CXXCPP=$ac_cv_prog_CXXCPP +else + ac_cv_prog_CXXCPP=$CXXCPP +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXXCPP" >&5 +$as_echo "$CXXCPP" >&6; } +ac_preproc_ok=false +for ac_cxx_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "C++ preprocessor \"$CXXCPP\" fails sanity check +See \`config.log' for more details" "$LINENO" 5; } +fi + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 +$as_echo_n "checking for grep that handles long lines and -e... " >&6; } +if ${ac_cv_path_GREP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$GREP"; then + ac_path_GREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in grep ggrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_GREP" || continue +# Check for GNU ac_path_GREP and select it if it is found. + # Check for GNU $ac_path_GREP +case `"$ac_path_GREP" --version 2>&1` in +*GNU*) + ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo 'GREP' >> "conftest.nl" + "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_GREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_GREP="$ac_path_GREP" + ac_path_GREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_GREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_GREP"; then + as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi +else + ac_cv_path_GREP=$GREP +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 +$as_echo "$ac_cv_path_GREP" >&6; } + GREP="$ac_cv_path_GREP" + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 +$as_echo_n "checking for egrep... " >&6; } +if ${ac_cv_path_EGREP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 + then ac_cv_path_EGREP="$GREP -E" + else + if test -z "$EGREP"; then + ac_path_EGREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in egrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_EGREP" || continue +# Check for GNU ac_path_EGREP and select it if it is found. + # Check for GNU $ac_path_EGREP +case `"$ac_path_EGREP" --version 2>&1` in +*GNU*) + ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo 'EGREP' >> "conftest.nl" + "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_EGREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_EGREP="$ac_path_EGREP" + ac_path_EGREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_EGREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_EGREP"; then + as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi +else + ac_cv_path_EGREP=$EGREP +fi + + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 +$as_echo "$ac_cv_path_EGREP" >&6; } + EGREP="$ac_cv_path_EGREP" + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 +$as_echo_n "checking for ANSI C header files... " >&6; } +if ${ac_cv_header_stdc+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#include +#include + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_cv_header_stdc=yes +else + ac_cv_header_stdc=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +if test $ac_cv_header_stdc = yes; then + # SunOS 4.x string.h does not declare mem*, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "memchr" >/dev/null 2>&1; then : + +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "free" >/dev/null 2>&1; then : + +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. + if test "$cross_compiling" = yes; then : + : +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#if ((' ' & 0x0FF) == 0x020) +# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') +# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) +#else +# define ISLOWER(c) \ + (('a' <= (c) && (c) <= 'i') \ + || ('j' <= (c) && (c) <= 'r') \ + || ('s' <= (c) && (c) <= 'z')) +# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) +#endif + +#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) +int +main () +{ + int i; + for (i = 0; i < 256; i++) + if (XOR (islower (i), ISLOWER (i)) + || toupper (i) != TOUPPER (i)) + return 2; + return 0; +} +_ACEOF +if ac_fn_cxx_try_run "$LINENO"; then : + +else + ac_cv_header_stdc=no +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + +fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 +$as_echo "$ac_cv_header_stdc" >&6; } +if test $ac_cv_header_stdc = yes; then + +$as_echo "#define STDC_HEADERS 1" >>confdefs.h + +fi + +# On IRIX 5.3, sys/types and inttypes.h are conflicting. +for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ + inttypes.h stdint.h unistd.h +do : + as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +ac_fn_cxx_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default +" +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether byte ordering is bigendian" >&5 +$as_echo_n "checking whether byte ordering is bigendian... " >&6; } +if ${ac_cv_c_bigendian+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_cv_c_bigendian=unknown + # See if we're dealing with a universal compiler. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifndef __APPLE_CC__ + not a universal capable compiler + #endif + typedef int dummy; + +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + + # Check for potential -arch flags. It is not universal unless + # there are at least two -arch flags with different values. + ac_arch= + ac_prev= + for ac_word in $CC $CFLAGS $CPPFLAGS $LDFLAGS; do + if test -n "$ac_prev"; then + case $ac_word in + i?86 | x86_64 | ppc | ppc64) + if test -z "$ac_arch" || test "$ac_arch" = "$ac_word"; then + ac_arch=$ac_word + else + ac_cv_c_bigendian=universal + break + fi + ;; + esac + ac_prev= + elif test "x$ac_word" = "x-arch"; then + ac_prev=arch + fi + done +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + if test $ac_cv_c_bigendian = unknown; then + # See if sys/param.h defines the BYTE_ORDER macro. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + #include + +int +main () +{ +#if ! (defined BYTE_ORDER && defined BIG_ENDIAN \ + && defined LITTLE_ENDIAN && BYTE_ORDER && BIG_ENDIAN \ + && LITTLE_ENDIAN) + bogus endian macros + #endif + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + # It does; now see whether it defined to BIG_ENDIAN or not. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + #include + +int +main () +{ +#if BYTE_ORDER != BIG_ENDIAN + not big endian + #endif + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_cv_c_bigendian=yes +else + ac_cv_c_bigendian=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + fi + if test $ac_cv_c_bigendian = unknown; then + # See if defines _LITTLE_ENDIAN or _BIG_ENDIAN (e.g., Solaris). + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +int +main () +{ +#if ! (defined _LITTLE_ENDIAN || defined _BIG_ENDIAN) + bogus endian macros + #endif + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + # It does; now see whether it defined to _BIG_ENDIAN or not. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +int +main () +{ +#ifndef _BIG_ENDIAN + not big endian + #endif + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_cv_c_bigendian=yes +else + ac_cv_c_bigendian=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + fi + if test $ac_cv_c_bigendian = unknown; then + # Compile a test program. + if test "$cross_compiling" = yes; then : + # Try to guess by grepping values from an object file. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +short int ascii_mm[] = + { 0x4249, 0x4765, 0x6E44, 0x6961, 0x6E53, 0x7953, 0 }; + short int ascii_ii[] = + { 0x694C, 0x5454, 0x656C, 0x6E45, 0x6944, 0x6E61, 0 }; + int use_ascii (int i) { + return ascii_mm[i] + ascii_ii[i]; + } + short int ebcdic_ii[] = + { 0x89D3, 0xE3E3, 0x8593, 0x95C5, 0x89C4, 0x9581, 0 }; + short int ebcdic_mm[] = + { 0xC2C9, 0xC785, 0x95C4, 0x8981, 0x95E2, 0xA8E2, 0 }; + int use_ebcdic (int i) { + return ebcdic_mm[i] + ebcdic_ii[i]; + } + extern int foo; + +int +main () +{ +return use_ascii (foo) == use_ebcdic (foo); + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + if grep BIGenDianSyS conftest.$ac_objext >/dev/null; then + ac_cv_c_bigendian=yes + fi + if grep LiTTleEnDian conftest.$ac_objext >/dev/null ; then + if test "$ac_cv_c_bigendian" = unknown; then + ac_cv_c_bigendian=no + else + # finding both strings is unlikely to happen, but who knows? + ac_cv_c_bigendian=unknown + fi + fi +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ + + /* Are we little or big endian? From Harbison&Steele. */ + union + { + long int l; + char c[sizeof (long int)]; + } u; + u.l = 1; + return u.c[sizeof (long int) - 1] == 1; + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_run "$LINENO"; then : + ac_cv_c_bigendian=no +else + ac_cv_c_bigendian=yes +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_bigendian" >&5 +$as_echo "$ac_cv_c_bigendian" >&6; } + case $ac_cv_c_bigendian in #( + yes) + $as_echo "#define WORDS_BIGENDIAN 1" >>confdefs.h +;; #( + no) + ;; #( + universal) + +$as_echo "#define AC_APPLE_UNIVERSAL_BUILD 1" >>confdefs.h + + ;; #( + *) + as_fn_error $? "unknown endianness + presetting ac_cv_c_bigendian=no (or yes) will help" "$LINENO" 5 ;; + esac + + +#------------------------------------------------------------------------- +# MCPPBS specific program checks +#------------------------------------------------------------------------- +# These macros check to see if we can do a stow-based install and also +# check for an isa simulator suitable for running the unit test programs +# via the makefile. + +# Find a good install program. We prefer a C program (faster), +# so one script is as good as another. But avoid the broken or +# incompatible versions: +# SysV /etc/install, /usr/sbin/install +# SunOS /usr/etc/install +# IRIX /sbin/install +# AIX /bin/install +# AmigaOS /C/install, which installs bootblocks on floppy discs +# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag +# AFS /usr/afsws/bin/install, which mishandles nonexistent args +# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" +# OS/2's system install, which has a completely different semantic +# ./install, which can be erroneously created by make from ./install.sh. +# Reject install programs that cannot install multiple files. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5 +$as_echo_n "checking for a BSD-compatible install... " >&6; } +if test -z "$INSTALL"; then +if ${ac_cv_path_install+:} false; then : + $as_echo_n "(cached) " >&6 +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + # Account for people who put trailing slashes in PATH elements. +case $as_dir/ in #(( + ./ | .// | /[cC]/* | \ + /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ + ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \ + /usr/ucb/* ) ;; + *) + # OSF1 and SCO ODT 3.0 have their own names for install. + # Don't use installbsd from OSF since it installs stuff as root + # by default. + for ac_prog in ginstall scoinst install; do + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then + if test $ac_prog = install && + grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # AIX install. It has an incompatible calling convention. + : + elif test $ac_prog = install && + grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # program-specific install script used by HP pwplus--don't use. + : + else + rm -rf conftest.one conftest.two conftest.dir + echo one > conftest.one + echo two > conftest.two + mkdir conftest.dir + if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" && + test -s conftest.one && test -s conftest.two && + test -s conftest.dir/conftest.one && + test -s conftest.dir/conftest.two + then + ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" + break 3 + fi + fi + fi + done + done + ;; +esac + + done +IFS=$as_save_IFS + +rm -rf conftest.one conftest.two conftest.dir + +fi + if test "${ac_cv_path_install+set}" = set; then + INSTALL=$ac_cv_path_install + else + # As a last resort, use the slow shell script. Don't cache a + # value for INSTALL within a source directory, because that will + # break other packages using the cache if that directory is + # removed, or if the value is a relative name. + INSTALL=$ac_install_sh + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5 +$as_echo "$INSTALL" >&6; } + +# Use test -z because SunOS4 sh mishandles braces in ${var-val}. +# It thinks the first close brace ends the variable substitution. +test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' + +test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' + +test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' + + + + # Configure command line option + + # Check whether --enable-stow was given. +if test "${enable_stow+set}" = set; then : + enableval=$enable_stow; enable_stow="yes" +else + enable_stow="no" +fi + + + + + # Environment variables + + + + + # Check for install script + + + + +#------------------------------------------------------------------------- +# Checks for header files +#------------------------------------------------------------------------- + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 +$as_echo_n "checking for ANSI C header files... " >&6; } +if ${ac_cv_header_stdc+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#include +#include + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_cv_header_stdc=yes +else + ac_cv_header_stdc=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +if test $ac_cv_header_stdc = yes; then + # SunOS 4.x string.h does not declare mem*, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "memchr" >/dev/null 2>&1; then : + +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "free" >/dev/null 2>&1; then : + +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. + if test "$cross_compiling" = yes; then : + : +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#if ((' ' & 0x0FF) == 0x020) +# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') +# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) +#else +# define ISLOWER(c) \ + (('a' <= (c) && (c) <= 'i') \ + || ('j' <= (c) && (c) <= 'r') \ + || ('s' <= (c) && (c) <= 'z')) +# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) +#endif + +#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) +int +main () +{ + int i; + for (i = 0; i < 256; i++) + if (XOR (islower (i), ISLOWER (i)) + || toupper (i) != TOUPPER (i)) + return 2; + return 0; +} +_ACEOF +if ac_fn_cxx_try_run "$LINENO"; then : + +else + ac_cv_header_stdc=no +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + +fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 +$as_echo "$ac_cv_header_stdc" >&6; } +if test $ac_cv_header_stdc = yes; then + +$as_echo "#define STDC_HEADERS 1" >>confdefs.h + +fi + + +#------------------------------------------------------------------------- +# Checks for type +#------------------------------------------------------------------------- + +ac_fn_cxx_check_type "$LINENO" "__int128_t" "ac_cv_type___int128_t" "$ac_includes_default" +if test "x$ac_cv_type___int128_t" = xyes; then : + HAVE_INT128=yes + +fi + + +#------------------------------------------------------------------------- +# Default compiler flags +#------------------------------------------------------------------------- + + + + + +for flag in -Wl,--export-dynamic; do + as_CACHEVAR=`$as_echo "ax_cv_check_ldflags__$flag" | $as_tr_sh` +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the linker accepts $flag" >&5 +$as_echo_n "checking whether the linker accepts $flag... " >&6; } +if eval \${$as_CACHEVAR+:} false; then : + $as_echo_n "(cached) " >&6 +else + + ax_check_save_flags=$LDFLAGS + LDFLAGS="$LDFLAGS $flag" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + eval "$as_CACHEVAR=yes" +else + eval "$as_CACHEVAR=no" +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS=$ax_check_save_flags +fi +eval ac_res=\$$as_CACHEVAR + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +if eval test \"x\$"$as_CACHEVAR"\" = x"yes"; then : + +if ${LDFLAGS+:} false; then : + + case " $LDFLAGS " in #( + *" $flag "*) : + { { $as_echo "$as_me:${as_lineno-$LINENO}: : LDFLAGS already contains \$flag"; } >&5 + (: LDFLAGS already contains $flag) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } ;; #( + *) : + + as_fn_append LDFLAGS " $flag" + { { $as_echo "$as_me:${as_lineno-$LINENO}: : LDFLAGS=\"\$LDFLAGS\""; } >&5 + (: LDFLAGS="$LDFLAGS") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + ;; +esac + +else + + LDFLAGS=$flag + { { $as_echo "$as_me:${as_lineno-$LINENO}: : LDFLAGS=\"\$LDFLAGS\""; } >&5 + (: LDFLAGS="$LDFLAGS") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + +fi + +else + : +fi + +done + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether C++ compiler accepts -relocatable-pch" >&5 +$as_echo_n "checking whether C++ compiler accepts -relocatable-pch... " >&6; } +if ${ax_cv_check_cxxflags___relocatable_pch+:} false; then : + $as_echo_n "(cached) " >&6 +else + + ax_check_save_flags=$CXXFLAGS + CXXFLAGS="$CXXFLAGS -relocatable-pch" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ax_cv_check_cxxflags___relocatable_pch=yes +else + ax_cv_check_cxxflags___relocatable_pch=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + CXXFLAGS=$ax_check_save_flags +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_check_cxxflags___relocatable_pch" >&5 +$as_echo "$ax_cv_check_cxxflags___relocatable_pch" >&6; } +if test "x$ax_cv_check_cxxflags___relocatable_pch" = xyes; then : + HAVE_CLANG_PCH=yes + +else + : +fi + + +#------------------------------------------------------------------------- +# MCPPBS subproject list +#------------------------------------------------------------------------- +# Order list so that subprojects only depend on those listed earlier. +# The '*' suffix indicates an optional subproject. The '**' suffix +# indicates an optional subproject which is also the name of a group. + + + + # Add command line argument to enable all optional subprojects + + # Check whether --enable-optional-subprojects was given. +if test "${enable_optional_subprojects+set}" = set; then : + enableval=$enable_optional_subprojects; +fi + + + # Loop through the subprojects given in the macro argument + + + + # Determine if this is a required or an optional subproject + + + + # Determine if there is a group with the same name + + + + # Create variations of the subproject name suitable for use as a CPP + # enabled define, a shell enabled variable, and a shell function + + + + + + + + + + + + # Add subproject to our running list + + subprojects="$subprojects fesvr" + + # Process the subproject appropriately. If enabled add it to the + # $enabled_subprojects running shell variable, set a + # SUBPROJECT_ENABLED C define, and include the appropriate + # 'subproject.ac'. + + + { $as_echo "$as_me:${as_lineno-$LINENO}: configuring default subproject : fesvr" >&5 +$as_echo "$as_me: configuring default subproject : fesvr" >&6;} + ac_config_files="$ac_config_files fesvr.mk:fesvr/fesvr.mk.in" + + enable_fesvr_sproj="yes" + subprojects_enabled="$subprojects_enabled fesvr" + +$as_echo "#define FESVR_ENABLED /**/" >>confdefs.h + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for pthread_create in -lpthread" >&5 +$as_echo_n "checking for pthread_create in -lpthread... " >&6; } +if ${ac_cv_lib_pthread_pthread_create+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lpthread $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char pthread_create (); +int +main () +{ +return pthread_create (); + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + ac_cv_lib_pthread_pthread_create=yes +else + ac_cv_lib_pthread_pthread_create=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_pthread_pthread_create" >&5 +$as_echo "$ac_cv_lib_pthread_pthread_create" >&6; } +if test "x$ac_cv_lib_pthread_pthread_create" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBPTHREAD 1 +_ACEOF + + LIBS="-lpthread $LIBS" + +else + as_fn_error $? "libpthread is required" "$LINENO" 5 +fi + + +ac_fn_cxx_check_member "$LINENO" "struct statx" "stx_ino" "ac_cv_member_struct_statx_stx_ino" "$ac_includes_default" +if test "x$ac_cv_member_struct_statx_stx_ino" = xyes; then : + +cat >>confdefs.h <<_ACEOF +#define HAVE_STATX 1 +_ACEOF + +fi + + +ac_fn_cxx_check_member "$LINENO" "struct statx" "stx_mnt_id" "ac_cv_member_struct_statx_stx_mnt_id" "$ac_includes_default" +if test "x$ac_cv_member_struct_statx_stx_mnt_id" = xyes; then : + +cat >>confdefs.h <<_ACEOF +#define HAVE_STATX_MNT_ID 1 +_ACEOF + +fi + + + + + + + # Determine if this is a required or an optional subproject + + + + # Determine if there is a group with the same name + + + + # Create variations of the subproject name suitable for use as a CPP + # enabled define, a shell enabled variable, and a shell function + + + + + + + + + + + + # Add subproject to our running list + + subprojects="$subprojects riscv" + + # Process the subproject appropriately. If enabled add it to the + # $enabled_subprojects running shell variable, set a + # SUBPROJECT_ENABLED C define, and include the appropriate + # 'subproject.ac'. + + + { $as_echo "$as_me:${as_lineno-$LINENO}: configuring default subproject : riscv" >&5 +$as_echo "$as_me: configuring default subproject : riscv" >&6;} + ac_config_files="$ac_config_files riscv.mk:riscv/riscv.mk.in" + + enable_riscv_sproj="yes" + subprojects_enabled="$subprojects_enabled riscv" + +$as_echo "#define RISCV_ENABLED /**/" >>confdefs.h + + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + + + +# Check whether --with-boost was given. +if test "${with_boost+set}" = set; then : + withval=$with_boost; + case $withval in #( + no) : + want_boost="no";_AX_BOOST_BASE_boost_path="" ;; #( + yes) : + want_boost="yes";_AX_BOOST_BASE_boost_path="" ;; #( + *) : + want_boost="yes";_AX_BOOST_BASE_boost_path="$withval" ;; +esac + +else + want_boost="yes" +fi + + + + +# Check whether --with-boost-libdir was given. +if test "${with_boost_libdir+set}" = set; then : + withval=$with_boost_libdir; + if test -d "$withval"; then : + _AX_BOOST_BASE_boost_lib_path="$withval" +else + as_fn_error $? "--with-boost-libdir expected directory name" "$LINENO" 5 +fi + +else + _AX_BOOST_BASE_boost_lib_path="" +fi + + +BOOST_LDFLAGS="" +BOOST_CPPFLAGS="" +if test "x$want_boost" = "xyes"; then : + + + if test "x1.53" = "x"; then : + _AX_BOOST_BASE_TONUMERICVERSION_req="1.20.0" +else + _AX_BOOST_BASE_TONUMERICVERSION_req="1.53" +fi + _AX_BOOST_BASE_TONUMERICVERSION_req_shorten=`expr $_AX_BOOST_BASE_TONUMERICVERSION_req : '\([0-9]*\.[0-9]*\)'` + _AX_BOOST_BASE_TONUMERICVERSION_req_major=`expr $_AX_BOOST_BASE_TONUMERICVERSION_req : '\([0-9]*\)'` + if test "x$_AX_BOOST_BASE_TONUMERICVERSION_req_major" = "x"; then : + as_fn_error $? "You should at least specify libboost major version" "$LINENO" 5 +fi + _AX_BOOST_BASE_TONUMERICVERSION_req_minor=`expr $_AX_BOOST_BASE_TONUMERICVERSION_req : '[0-9]*\.\([0-9]*\)'` + if test "x$_AX_BOOST_BASE_TONUMERICVERSION_req_minor" = "x"; then : + _AX_BOOST_BASE_TONUMERICVERSION_req_minor="0" +fi + _AX_BOOST_BASE_TONUMERICVERSION_req_sub_minor=`expr $_AX_BOOST_BASE_TONUMERICVERSION_req : '[0-9]*\.[0-9]*\.\([0-9]*\)'` + if test "X$_AX_BOOST_BASE_TONUMERICVERSION_req_sub_minor" = "X"; then : + _AX_BOOST_BASE_TONUMERICVERSION_req_sub_minor="0" +fi + _AX_BOOST_BASE_TONUMERICVERSION_RET=`expr $_AX_BOOST_BASE_TONUMERICVERSION_req_major \* 100000 \+ $_AX_BOOST_BASE_TONUMERICVERSION_req_minor \* 100 \+ $_AX_BOOST_BASE_TONUMERICVERSION_req_sub_minor` + WANT_BOOST_VERSION=$_AX_BOOST_BASE_TONUMERICVERSION_RET + + succeeded=no + + + + case ${host_cpu} in #( + x86_64) : + libsubdirs="lib64 libx32 lib lib64" ;; #( + mips*64*) : + libsubdirs="lib64 lib32 lib lib64" ;; #( + ppc64|powerpc64|s390x|sparc64|aarch64|ppc64le|powerpc64le|riscv64|e2k) : + libsubdirs="lib64 lib lib64" ;; #( + *) : + libsubdirs="lib" + ;; +esac + + case ${host_cpu} in #( + i?86) : + multiarch_libsubdir="lib/i386-${host_os}" ;; #( + armv7l) : + multiarch_libsubdir="lib/arm-${host_os}" ;; #( + *) : + multiarch_libsubdir="lib/${host_cpu}-${host_os}" + ;; +esac + + if test "x$_AX_BOOST_BASE_boost_path" != "x"; then : + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for boostlib >= 1.53 ($WANT_BOOST_VERSION) includes in \"$_AX_BOOST_BASE_boost_path/include\"" >&5 +$as_echo_n "checking for boostlib >= 1.53 ($WANT_BOOST_VERSION) includes in \"$_AX_BOOST_BASE_boost_path/include\"... " >&6; } + if test -d "$_AX_BOOST_BASE_boost_path/include" && test -r "$_AX_BOOST_BASE_boost_path/include"; then : + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + BOOST_CPPFLAGS="-I$_AX_BOOST_BASE_boost_path/include" + for _AX_BOOST_BASE_boost_path_tmp in $multiarch_libsubdir $libsubdirs; do + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for boostlib >= 1.53 ($WANT_BOOST_VERSION) lib path in \"$_AX_BOOST_BASE_boost_path/$_AX_BOOST_BASE_boost_path_tmp\"" >&5 +$as_echo_n "checking for boostlib >= 1.53 ($WANT_BOOST_VERSION) lib path in \"$_AX_BOOST_BASE_boost_path/$_AX_BOOST_BASE_boost_path_tmp\"... " >&6; } + if test -d "$_AX_BOOST_BASE_boost_path/$_AX_BOOST_BASE_boost_path_tmp" && test -r "$_AX_BOOST_BASE_boost_path/$_AX_BOOST_BASE_boost_path_tmp" ; then : + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + BOOST_LDFLAGS="-L$_AX_BOOST_BASE_boost_path/$_AX_BOOST_BASE_boost_path_tmp"; + break; + +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + done +else + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + +else + + if test X"$cross_compiling" = Xyes; then + search_libsubdirs=$multiarch_libsubdir + else + search_libsubdirs="$multiarch_libsubdir $libsubdirs" + fi + for _AX_BOOST_BASE_boost_path_tmp in /usr /usr/local /opt /opt/local ; do + if test -d "$_AX_BOOST_BASE_boost_path_tmp/include/boost" && test -r "$_AX_BOOST_BASE_boost_path_tmp/include/boost" ; then + for libsubdir in $search_libsubdirs ; do + if ls "$_AX_BOOST_BASE_boost_path_tmp/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi + done + BOOST_LDFLAGS="-L$_AX_BOOST_BASE_boost_path_tmp/$libsubdir" + BOOST_CPPFLAGS="-I$_AX_BOOST_BASE_boost_path_tmp/include" + break; + fi + done + +fi + + if test "x$_AX_BOOST_BASE_boost_lib_path" != "x"; then : + BOOST_LDFLAGS="-L$_AX_BOOST_BASE_boost_lib_path" +fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for boostlib >= 1.53 ($WANT_BOOST_VERSION)" >&5 +$as_echo_n "checking for boostlib >= 1.53 ($WANT_BOOST_VERSION)... " >&6; } + CPPFLAGS_SAVED="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" + export CPPFLAGS + + LDFLAGS_SAVED="$LDFLAGS" + LDFLAGS="$LDFLAGS $BOOST_LDFLAGS" + export LDFLAGS + + + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include + +int +main () +{ + +(void) ((void)sizeof(char[1 - 2*!!((BOOST_VERSION) < ($WANT_BOOST_VERSION))])); + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + succeeded=yes + found_system=yes + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + + + + if test "x$succeeded" != "xyes" ; then + CPPFLAGS="$CPPFLAGS_SAVED" + LDFLAGS="$LDFLAGS_SAVED" + BOOST_CPPFLAGS= + if test -z "$_AX_BOOST_BASE_boost_lib_path" ; then + BOOST_LDFLAGS= + fi + _version=0 + if test -n "$_AX_BOOST_BASE_boost_path" ; then + if test -d "$_AX_BOOST_BASE_boost_path" && test -r "$_AX_BOOST_BASE_boost_path"; then + for i in `ls -d $_AX_BOOST_BASE_boost_path/include/boost-* 2>/dev/null`; do + _version_tmp=`echo $i | sed "s#$_AX_BOOST_BASE_boost_path##" | sed 's/\/include\/boost-//' | sed 's/_/./'` + V_CHECK=`expr $_version_tmp \> $_version` + if test "x$V_CHECK" = "x1" ; then + _version=$_version_tmp + fi + VERSION_UNDERSCORE=`echo $_version | sed 's/\./_/'` + BOOST_CPPFLAGS="-I$_AX_BOOST_BASE_boost_path/include/boost-$VERSION_UNDERSCORE" + done + if test -z "$BOOST_CPPFLAGS"; then + if test -d "$_AX_BOOST_BASE_boost_path/boost" && test -r "$_AX_BOOST_BASE_boost_path/boost"; then + BOOST_CPPFLAGS="-I$_AX_BOOST_BASE_boost_path" + fi + fi + if test -n "$BOOST_CPPFLAGS" && test -z "$BOOST_LDFLAGS"; then + for libsubdir in $libsubdirs ; do + if ls "$_AX_BOOST_BASE_boost_path/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi + done + BOOST_LDFLAGS="-L$_AX_BOOST_BASE_boost_path/$libsubdir" + fi + fi + else + if test "x$cross_compiling" != "xyes" ; then + for _AX_BOOST_BASE_boost_path in /usr /usr/local /opt /opt/local ; do + if test -d "$_AX_BOOST_BASE_boost_path" && test -r "$_AX_BOOST_BASE_boost_path" ; then + for i in `ls -d $_AX_BOOST_BASE_boost_path/include/boost-* 2>/dev/null`; do + _version_tmp=`echo $i | sed "s#$_AX_BOOST_BASE_boost_path##" | sed 's/\/include\/boost-//' | sed 's/_/./'` + V_CHECK=`expr $_version_tmp \> $_version` + if test "x$V_CHECK" = "x1" ; then + _version=$_version_tmp + best_path=$_AX_BOOST_BASE_boost_path + fi + done + fi + done + + VERSION_UNDERSCORE=`echo $_version | sed 's/\./_/'` + BOOST_CPPFLAGS="-I$best_path/include/boost-$VERSION_UNDERSCORE" + if test -z "$_AX_BOOST_BASE_boost_lib_path" ; then + for libsubdir in $libsubdirs ; do + if ls "$best_path/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi + done + BOOST_LDFLAGS="-L$best_path/$libsubdir" + fi + fi + + if test -n "$BOOST_ROOT" ; then + for libsubdir in $libsubdirs ; do + if ls "$BOOST_ROOT/stage/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi + done + if test -d "$BOOST_ROOT" && test -r "$BOOST_ROOT" && test -d "$BOOST_ROOT/stage/$libsubdir" && test -r "$BOOST_ROOT/stage/$libsubdir"; then + version_dir=`expr //$BOOST_ROOT : '.*/\(.*\)'` + stage_version=`echo $version_dir | sed 's/boost_//' | sed 's/_/./g'` + stage_version_shorten=`expr $stage_version : '\([0-9]*\.[0-9]*\)'` + V_CHECK=`expr $stage_version_shorten \>\= $_version` + if test "x$V_CHECK" = "x1" && test -z "$_AX_BOOST_BASE_boost_lib_path" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: We will use a staged boost library from $BOOST_ROOT" >&5 +$as_echo "$as_me: We will use a staged boost library from $BOOST_ROOT" >&6;} + BOOST_CPPFLAGS="-I$BOOST_ROOT" + BOOST_LDFLAGS="-L$BOOST_ROOT/stage/$libsubdir" + fi + fi + fi + fi + + CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" + export CPPFLAGS + LDFLAGS="$LDFLAGS $BOOST_LDFLAGS" + export LDFLAGS + + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include + +int +main () +{ + +(void) ((void)sizeof(char[1 - 2*!!((BOOST_VERSION) < ($WANT_BOOST_VERSION))])); + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + succeeded=yes + found_system=yes + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + fi + + if test "x$succeeded" != "xyes" ; then + if test "x$_version" = "x0" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: We could not detect the boost libraries (version 1.53 or higher). If you have a staged boost library (still not installed) please specify \$BOOST_ROOT in your environment and do not give a PATH to --with-boost option. If you are sure you have boost installed, then check your version number looking in . See http://randspringer.de/boost for more documentation." >&5 +$as_echo "$as_me: We could not detect the boost libraries (version 1.53 or higher). If you have a staged boost library (still not installed) please specify \$BOOST_ROOT in your environment and do not give a PATH to --with-boost option. If you are sure you have boost installed, then check your version number looking in . See http://randspringer.de/boost for more documentation." >&6;} + else + { $as_echo "$as_me:${as_lineno-$LINENO}: Your boost libraries seems to old (version $_version)." >&5 +$as_echo "$as_me: Your boost libraries seems to old (version $_version)." >&6;} + fi + # execute ACTION-IF-NOT-FOUND (if present): + : + else + +$as_echo "#define HAVE_BOOST /**/" >>confdefs.h + + # execute ACTION-IF-FOUND (if present): + : + fi + + CPPFLAGS="$CPPFLAGS_SAVED" + LDFLAGS="$LDFLAGS_SAVED" + + +fi + + + + + +# Check whether --with-boost-asio was given. +if test "${with_boost_asio+set}" = set; then : + withval=$with_boost_asio; + if test "$withval" = "no"; then + want_boost="no" + elif test "$withval" = "yes"; then + want_boost="yes" + ax_boost_user_asio_lib="" + else + want_boost="yes" + ax_boost_user_asio_lib="$withval" + fi + +else + want_boost="yes" + +fi + + + if test "x$want_boost" = "xyes"; then + + CPPFLAGS_SAVED="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" + export CPPFLAGS + + LDFLAGS_SAVED="$LDFLAGS" + LDFLAGS="$LDFLAGS $BOOST_LDFLAGS" + export LDFLAGS + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the Boost::ASIO library is available" >&5 +$as_echo_n "checking whether the Boost::ASIO library is available... " >&6; } +if ${ax_cv_boost_asio+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + #include + +int +main () +{ + + + boost::asio::io_service io; + boost::system::error_code timer_result; + boost::asio::deadline_timer t(io); + t.cancel(); + io.run_one(); + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ax_cv_boost_asio=yes +else + ax_cv_boost_asio=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_boost_asio" >&5 +$as_echo "$ax_cv_boost_asio" >&6; } + if test "x$ax_cv_boost_asio" = "xyes"; then + +$as_echo "#define HAVE_BOOST_ASIO /**/" >>confdefs.h + + BN=boost_system + BOOSTLIBDIR=`echo $BOOST_LDFLAGS | sed -e 's/[^\/]*//'` + if test "x$ax_boost_user_asio_lib" = "x"; then + for ax_lib in `ls $BOOSTLIBDIR/libboost_system*.so* $BOOSTLIBDIR/libboost_system*.dylib* $BOOSTLIBDIR/libboost_system*.a* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^lib\(boost_system.*\)\.so.*$;\1;' -e 's;^lib\(boost_system.*\)\.dylib.*$;\1;' -e 's;^lib\(boost_system.*\)\.a.*$;\1;' ` ; do + as_ac_Lib=`$as_echo "ac_cv_lib_$ax_lib''_main" | $as_tr_sh` +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for main in -l$ax_lib" >&5 +$as_echo_n "checking for main in -l$ax_lib... " >&6; } +if eval \${$as_ac_Lib+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-l$ax_lib $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ +return main (); + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + eval "$as_ac_Lib=yes" +else + eval "$as_ac_Lib=no" +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +eval ac_res=\$$as_ac_Lib + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : + BOOST_ASIO_LIB="-l$ax_lib" link_thread="yes" break +else + link_thread="no" +fi + + done + else + for ax_lib in $ax_boost_user_asio_lib $BN-$ax_boost_user_asio_lib; do + as_ac_Lib=`$as_echo "ac_cv_lib_$ax_lib''_main" | $as_tr_sh` +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for main in -l$ax_lib" >&5 +$as_echo_n "checking for main in -l$ax_lib... " >&6; } +if eval \${$as_ac_Lib+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-l$ax_lib $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ +return main (); + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + eval "$as_ac_Lib=yes" +else + eval "$as_ac_Lib=no" +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +eval ac_res=\$$as_ac_Lib + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : + BOOST_ASIO_LIB="-l$ax_lib" link_asio="yes" break +else + link_asio="no" +fi + + done + + fi + if test "x$ax_lib" = "x"; then + as_fn_error $? "Could not find a version of the Boost::Asio library!" "$LINENO" 5 + fi + if test "x$link_asio" = "xno"; then + as_fn_error $? "Could not link against $ax_lib !" "$LINENO" 5 + fi + fi + + CPPFLAGS="$CPPFLAGS_SAVED" + LDFLAGS="$LDFLAGS_SAVED" + fi + + + +# Check whether --with-boost-regex was given. +if test "${with_boost_regex+set}" = set; then : + withval=$with_boost_regex; + if test "$withval" = "no"; then + want_boost="no" + elif test "$withval" = "yes"; then + want_boost="yes" + ax_boost_user_regex_lib="" + else + want_boost="yes" + ax_boost_user_regex_lib="$withval" + fi + +else + want_boost="yes" + +fi + + + if test "x$want_boost" = "xyes"; then + + CPPFLAGS_SAVED="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" + export CPPFLAGS + + LDFLAGS_SAVED="$LDFLAGS" + LDFLAGS="$LDFLAGS $BOOST_LDFLAGS" + export LDFLAGS + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the Boost::Regex library is available" >&5 +$as_echo_n "checking whether the Boost::Regex library is available... " >&6; } +if ${ax_cv_boost_regex+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +int +main () +{ +boost::regex r(); return 0; + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ax_cv_boost_regex=yes +else + ax_cv_boost_regex=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_boost_regex" >&5 +$as_echo "$ax_cv_boost_regex" >&6; } + if test "x$ax_cv_boost_regex" = "xyes"; then + +$as_echo "#define HAVE_BOOST_REGEX /**/" >>confdefs.h + + BOOSTLIBDIR=`echo $BOOST_LDFLAGS | sed -e 's/[^\/]*//'` + if test "x$ax_boost_user_regex_lib" = "x"; then + for libextension in `ls $BOOSTLIBDIR/libboost_regex*.so* $BOOSTLIBDIR/libboost_regex*.dylib* $BOOSTLIBDIR/libboost_regex*.a* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^lib\(boost_regex.*\)\.so.*$;\1;' -e 's;^lib\(boost_regex.*\)\.dylib.*;\1;' -e 's;^lib\(boost_regex.*\)\.a.*$;\1;'` ; do + ax_lib=${libextension} + as_ac_Lib=`$as_echo "ac_cv_lib_$ax_lib''_exit" | $as_tr_sh` +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for exit in -l$ax_lib" >&5 +$as_echo_n "checking for exit in -l$ax_lib... " >&6; } +if eval \${$as_ac_Lib+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-l$ax_lib $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char exit (); +int +main () +{ +return exit (); + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + eval "$as_ac_Lib=yes" +else + eval "$as_ac_Lib=no" +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +eval ac_res=\$$as_ac_Lib + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : + BOOST_REGEX_LIB="-l$ax_lib"; link_regex="yes"; break +else + link_regex="no" +fi + + done + if test "x$link_regex" != "xyes"; then + for libextension in `ls $BOOSTLIBDIR/boost_regex*.dll* $BOOSTLIBDIR/boost_regex*.a* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^\(boost_regex.*\)\.dll.*$;\1;' -e 's;^\(boost_regex.*\)\.a.*$;\1;'` ; do + ax_lib=${libextension} + as_ac_Lib=`$as_echo "ac_cv_lib_$ax_lib''_exit" | $as_tr_sh` +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for exit in -l$ax_lib" >&5 +$as_echo_n "checking for exit in -l$ax_lib... " >&6; } +if eval \${$as_ac_Lib+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-l$ax_lib $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char exit (); +int +main () +{ +return exit (); + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + eval "$as_ac_Lib=yes" +else + eval "$as_ac_Lib=no" +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +eval ac_res=\$$as_ac_Lib + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : + BOOST_REGEX_LIB="-l$ax_lib"; link_regex="yes"; break +else + link_regex="no" +fi + + done + fi + + else + for ax_lib in $ax_boost_user_regex_lib boost_regex-$ax_boost_user_regex_lib; do + as_ac_Lib=`$as_echo "ac_cv_lib_$ax_lib''_main" | $as_tr_sh` +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for main in -l$ax_lib" >&5 +$as_echo_n "checking for main in -l$ax_lib... " >&6; } +if eval \${$as_ac_Lib+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-l$ax_lib $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ +return main (); + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + eval "$as_ac_Lib=yes" +else + eval "$as_ac_Lib=no" +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +eval ac_res=\$$as_ac_Lib + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : + BOOST_REGEX_LIB="-l$ax_lib"; link_regex="yes"; break +else + link_regex="no" +fi + + done + fi + if test "x$ax_lib" = "x"; then + as_fn_error $? "Could not find a version of the Boost::Regex library!" "$LINENO" 5 + fi + if test "x$link_regex" != "xyes"; then + as_fn_error $? "Could not link against $ax_lib !" "$LINENO" 5 + fi + fi + + CPPFLAGS="$CPPFLAGS_SAVED" + LDFLAGS="$LDFLAGS_SAVED" + fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for main in -lboost_system" >&5 +$as_echo_n "checking for main in -lboost_system... " >&6; } +if ${ac_cv_lib_boost_system_main+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lboost_system $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ +return main (); + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + ac_cv_lib_boost_system_main=yes +else + ac_cv_lib_boost_system_main=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_boost_system_main" >&5 +$as_echo "$ac_cv_lib_boost_system_main" >&6; } +if test "x$ac_cv_lib_boost_system_main" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBBOOST_SYSTEM 1 +_ACEOF + + LIBS="-lboost_system $LIBS" + +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for main in -lboost_regex" >&5 +$as_echo_n "checking for main in -lboost_regex... " >&6; } +if ${ac_cv_lib_boost_regex_main+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lboost_regex $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ +return main (); + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + ac_cv_lib_boost_regex_main=yes +else + ac_cv_lib_boost_regex_main=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_boost_regex_main" >&5 +$as_echo "$ac_cv_lib_boost_regex_main" >&6; } +if test "x$ac_cv_lib_boost_regex_main" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBBOOST_REGEX 1 +_ACEOF + + LIBS="-lboost_regex $LIBS" + +fi + + + +# Check whether --with-isa was given. +if test "${with_isa+set}" = set; then : + withval=$with_isa; +cat >>confdefs.h <<_ACEOF +#define DEFAULT_ISA "$withval" +_ACEOF + +else + +cat >>confdefs.h <<_ACEOF +#define DEFAULT_ISA "RV64IMAFDC" +_ACEOF + +fi + + + +# Check whether --with-priv was given. +if test "${with_priv+set}" = set; then : + withval=$with_priv; +cat >>confdefs.h <<_ACEOF +#define DEFAULT_PRIV "$withval" +_ACEOF + +else + +cat >>confdefs.h <<_ACEOF +#define DEFAULT_PRIV "MSU" +_ACEOF + +fi + + + +# Check whether --with-varch was given. +if test "${with_varch+set}" = set; then : + withval=$with_varch; +cat >>confdefs.h <<_ACEOF +#define DEFAULT_VARCH "$withval" +_ACEOF + +else + +cat >>confdefs.h <<_ACEOF +#define DEFAULT_VARCH "vlen:128,elen:64" +_ACEOF + +fi + + + +# Check whether --with-target was given. +if test "${with_target+set}" = set; then : + withval=$with_target; +cat >>confdefs.h <<_ACEOF +#define TARGET_ARCH "$withval" +_ACEOF + +else + +cat >>confdefs.h <<_ACEOF +#define TARGET_ARCH "riscv64-unknown-elf" +_ACEOF + +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing dlopen" >&5 +$as_echo_n "checking for library containing dlopen... " >&6; } +if ${ac_cv_search_dlopen+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dlopen (); +int +main () +{ +return dlopen (); + ; + return 0; +} +_ACEOF +for ac_lib in '' dl dld; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_cxx_try_link "$LINENO"; then : + ac_cv_search_dlopen=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_dlopen+:} false; then : + break +fi +done +if ${ac_cv_search_dlopen+:} false; then : + +else + ac_cv_search_dlopen=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_dlopen" >&5 +$as_echo "$ac_cv_search_dlopen" >&6; } +ac_res=$ac_cv_search_dlopen +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + + +$as_echo "#define HAVE_DLOPEN /**/" >>confdefs.h + + HAVE_DLOPEN=yes + + +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for pthread_create in -lpthread" >&5 +$as_echo_n "checking for pthread_create in -lpthread... " >&6; } +if ${ac_cv_lib_pthread_pthread_create+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lpthread $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char pthread_create (); +int +main () +{ +return pthread_create (); + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + ac_cv_lib_pthread_pthread_create=yes +else + ac_cv_lib_pthread_pthread_create=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_pthread_pthread_create" >&5 +$as_echo "$ac_cv_lib_pthread_pthread_create" >&6; } +if test "x$ac_cv_lib_pthread_pthread_create" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBPTHREAD 1 +_ACEOF + + LIBS="-lpthread $LIBS" + +else + as_fn_error $? "libpthread is required" "$LINENO" 5 +fi + + +# Check whether --enable-commitlog was given. +if test "${enable_commitlog+set}" = set; then : + enableval=$enable_commitlog; +fi + +if test "x$enable_commitlog" = "xyes"; then : + + +$as_echo "#define RISCV_ENABLE_COMMITLOG /**/" >>confdefs.h + + +fi + +# Check whether --enable-histogram was given. +if test "${enable_histogram+set}" = set; then : + enableval=$enable_histogram; +fi + +if test "x$enable_histogram" = "xyes"; then : + + +$as_echo "#define RISCV_ENABLE_HISTOGRAM /**/" >>confdefs.h + + +fi + +# Check whether --enable-dirty was given. +if test "${enable_dirty+set}" = set; then : + enableval=$enable_dirty; +fi + +if test "x$enable_dirty" = "xyes"; then : + + +$as_echo "#define RISCV_ENABLE_DIRTY /**/" >>confdefs.h + + +fi + +# Check whether --enable-misaligned was given. +if test "${enable_misaligned+set}" = set; then : + enableval=$enable_misaligned; +fi + +if test "x$enable_misaligned" = "xyes"; then : + + +$as_echo "#define RISCV_ENABLE_MISALIGNED /**/" >>confdefs.h + + +fi + +# Check whether --enable-dual-endian was given. +if test "${enable_dual_endian+set}" = set; then : + enableval=$enable_dual_endian; +fi + +if test "x$enable_dual_endian" = "xyes"; then : + + +$as_echo "#define RISCV_ENABLE_DUAL_ENDIAN /**/" >>confdefs.h + + +fi + + + + + + # Determine if this is a required or an optional subproject + + + + # Determine if there is a group with the same name + + + + # Create variations of the subproject name suitable for use as a CPP + # enabled define, a shell enabled variable, and a shell function + + + + + + + + + + + + # Add subproject to our running list + + subprojects="$subprojects disasm" + + # Process the subproject appropriately. If enabled add it to the + # $enabled_subprojects running shell variable, set a + # SUBPROJECT_ENABLED C define, and include the appropriate + # 'subproject.ac'. + + + { $as_echo "$as_me:${as_lineno-$LINENO}: configuring default subproject : disasm" >&5 +$as_echo "$as_me: configuring default subproject : disasm" >&6;} + ac_config_files="$ac_config_files disasm.mk:disasm/disasm.mk.in" + + enable_disasm_sproj="yes" + subprojects_enabled="$subprojects_enabled disasm" + +$as_echo "#define DISASM_ENABLED /**/" >>confdefs.h + + + + + + + # Determine if this is a required or an optional subproject + + + + # Determine if there is a group with the same name + + + + # Create variations of the subproject name suitable for use as a CPP + # enabled define, a shell enabled variable, and a shell function + + + + + + + + + + + + # Add subproject to our running list + + subprojects="$subprojects customext" + + # Process the subproject appropriately. If enabled add it to the + # $enabled_subprojects running shell variable, set a + # SUBPROJECT_ENABLED C define, and include the appropriate + # 'subproject.ac'. + + + { $as_echo "$as_me:${as_lineno-$LINENO}: configuring default subproject : customext" >&5 +$as_echo "$as_me: configuring default subproject : customext" >&6;} + ac_config_files="$ac_config_files customext.mk:customext/customext.mk.in" + + enable_customext_sproj="yes" + subprojects_enabled="$subprojects_enabled customext" + +$as_echo "#define CUSTOMEXT_ENABLED /**/" >>confdefs.h + + + + + + + # Determine if this is a required or an optional subproject + + + + # Determine if there is a group with the same name + + + + # Create variations of the subproject name suitable for use as a CPP + # enabled define, a shell enabled variable, and a shell function + + + + + + + + + + + + # Add subproject to our running list + + subprojects="$subprojects fdt" + + # Process the subproject appropriately. If enabled add it to the + # $enabled_subprojects running shell variable, set a + # SUBPROJECT_ENABLED C define, and include the appropriate + # 'subproject.ac'. + + + { $as_echo "$as_me:${as_lineno-$LINENO}: configuring default subproject : fdt" >&5 +$as_echo "$as_me: configuring default subproject : fdt" >&6;} + ac_config_files="$ac_config_files fdt.mk:fdt/fdt.mk.in" + + enable_fdt_sproj="yes" + subprojects_enabled="$subprojects_enabled fdt" + +$as_echo "#define FDT_ENABLED /**/" >>confdefs.h + + + + + + + # Determine if this is a required or an optional subproject + + + + # Determine if there is a group with the same name + + + + # Create variations of the subproject name suitable for use as a CPP + # enabled define, a shell enabled variable, and a shell function + + + + + + + + + + + + # Add subproject to our running list + + subprojects="$subprojects softfloat" + + # Process the subproject appropriately. If enabled add it to the + # $enabled_subprojects running shell variable, set a + # SUBPROJECT_ENABLED C define, and include the appropriate + # 'subproject.ac'. + + + { $as_echo "$as_me:${as_lineno-$LINENO}: configuring default subproject : softfloat" >&5 +$as_echo "$as_me: configuring default subproject : softfloat" >&6;} + ac_config_files="$ac_config_files softfloat.mk:softfloat/softfloat.mk.in" + + enable_softfloat_sproj="yes" + subprojects_enabled="$subprojects_enabled softfloat" + +$as_echo "#define SOFTFLOAT_ENABLED /**/" >>confdefs.h + + + + + + + # Determine if this is a required or an optional subproject + + + + # Determine if there is a group with the same name + + + + # Create variations of the subproject name suitable for use as a CPP + # enabled define, a shell enabled variable, and a shell function + + + + + + + + + + + + # Add subproject to our running list + + subprojects="$subprojects spike_main" + + # Process the subproject appropriately. If enabled add it to the + # $enabled_subprojects running shell variable, set a + # SUBPROJECT_ENABLED C define, and include the appropriate + # 'subproject.ac'. + + + { $as_echo "$as_me:${as_lineno-$LINENO}: configuring default subproject : spike_main" >&5 +$as_echo "$as_me: configuring default subproject : spike_main" >&6;} + ac_config_files="$ac_config_files spike_main.mk:spike_main/spike_main.mk.in" + + enable_spike_main_sproj="yes" + subprojects_enabled="$subprojects_enabled spike_main" + +$as_echo "#define SPIKE_MAIN_ENABLED /**/" >>confdefs.h + + + + + + + # Determine if this is a required or an optional subproject + + + + # Determine if there is a group with the same name + + + + # Create variations of the subproject name suitable for use as a CPP + # enabled define, a shell enabled variable, and a shell function + + + + + + + + + + + + # Add subproject to our running list + + subprojects="$subprojects spike_dasm" + + # Process the subproject appropriately. If enabled add it to the + # $enabled_subprojects running shell variable, set a + # SUBPROJECT_ENABLED C define, and include the appropriate + # 'subproject.ac'. + + + { $as_echo "$as_me:${as_lineno-$LINENO}: configuring default subproject : spike_dasm" >&5 +$as_echo "$as_me: configuring default subproject : spike_dasm" >&6;} + ac_config_files="$ac_config_files spike_dasm.mk:spike_dasm/spike_dasm.mk.in" + + enable_spike_dasm_sproj="yes" + subprojects_enabled="$subprojects_enabled spike_dasm" + +$as_echo "#define SPIKE_DASM_ENABLED /**/" >>confdefs.h + + + + + + + # Output make variables + + + + + + +#------------------------------------------------------------------------- +# MCPPBS subproject groups +#------------------------------------------------------------------------- +# If a group has the same name as a subproject then you must add the +# '**' suffix in the subproject list above. The list of subprojects in a +# group should be ordered so that subprojets only depend on those listed +# earlier. Here is an example: +# +# MCPPBS_GROUP( [group-name], [sproja,sprojb,...] ) +# + +#------------------------------------------------------------------------- +# Output +#------------------------------------------------------------------------- + +ac_config_headers="$ac_config_headers config.h" + +ac_config_files="$ac_config_files Makefile" + +ac_config_files="$ac_config_files riscv-fesvr.pc" + +ac_config_files="$ac_config_files riscv-disasm.pc" + +cat >confcache <<\_ACEOF +# This file is a shell script that caches the results of configure +# tests run on this system so they can be shared between configure +# scripts and configure runs, see configure's option --config-cache. +# It is not useful on other systems. If it contains results you don't +# want to keep, you may remove or edit it. +# +# config.status only pays attention to the cache file if you give it +# the --recheck option to rerun configure. +# +# `ac_cv_env_foo' variables (set or unset) will be overridden when +# loading this file, other *unset* `ac_cv_foo' will be assigned the +# following values. + +_ACEOF + +# The following way of writing the cache mishandles newlines in values, +# but we know of no workaround that is simple, portable, and efficient. +# So, we kill variables containing newlines. +# Ultrix sh set writes to stderr and can't be redirected directly, +# and sets the high bit in the cache file unless we assign to the vars. +( + for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do + eval ac_val=\$$ac_var + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( + *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 +$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( + BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( + *) { eval $ac_var=; unset $ac_var;} ;; + esac ;; + esac + done + + (set) 2>&1 | + case $as_nl`(ac_space=' '; set) 2>&1` in #( + *${as_nl}ac_space=\ *) + # `set' does not quote correctly, so add quotes: double-quote + # substitution turns \\\\ into \\, and sed turns \\ into \. + sed -n \ + "s/'/'\\\\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" + ;; #( + *) + # `set' quotes correctly as required by POSIX, so do not add quotes. + sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" + ;; + esac | + sort +) | + sed ' + /^ac_cv_env_/b end + t clear + :clear + s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ + t end + s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ + :end' >>confcache +if diff "$cache_file" confcache >/dev/null 2>&1; then :; else + if test -w "$cache_file"; then + if test "x$cache_file" != "x/dev/null"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 +$as_echo "$as_me: updating cache $cache_file" >&6;} + if test ! -f "$cache_file" || test -h "$cache_file"; then + cat confcache >"$cache_file" + else + case $cache_file in #( + */* | ?:*) + mv -f confcache "$cache_file"$$ && + mv -f "$cache_file"$$ "$cache_file" ;; #( + *) + mv -f confcache "$cache_file" ;; + esac + fi + fi + else + { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 +$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} + fi +fi +rm -f confcache + +test "x$prefix" = xNONE && prefix=$ac_default_prefix +# Let make expand exec_prefix. +test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' + +DEFS=-DHAVE_CONFIG_H + +ac_libobjs= +ac_ltlibobjs= +U= +for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue + # 1. Remove the extension, and $U if already installed. + ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' + ac_i=`$as_echo "$ac_i" | sed "$ac_script"` + # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR + # will be set to the directory where LIBOBJS objects are built. + as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" + as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' +done +LIBOBJS=$ac_libobjs + +LTLIBOBJS=$ac_ltlibobjs + + + + +: "${CONFIG_STATUS=./config.status}" +ac_write_fail=0 +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files $CONFIG_STATUS" +{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 +$as_echo "$as_me: creating $CONFIG_STATUS" >&6;} +as_write_fail=0 +cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 +#! $SHELL +# Generated by $as_me. +# Run this file to recreate the current configuration. +# Compiler output produced by configure, useful for debugging +# configure, is in config.log if it exists. + +debug=false +ac_cs_recheck=false +ac_cs_silent=false + +SHELL=\${CONFIG_SHELL-$SHELL} +export SHELL +_ASEOF +cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 +## -------------------- ## +## M4sh Initialization. ## +## -------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi + + +as_nl=' +' +export as_nl +# Printing a long string crashes Solaris 7 /usr/bin/printf. +as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo +# Prefer a ksh shell builtin over an external printf program on Solaris, +# but without wasting forks for bash or zsh. +if test -z "$BASH_VERSION$ZSH_VERSION" \ + && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='print -r --' + as_echo_n='print -rn --' +elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='printf %s\n' + as_echo_n='printf %s' +else + if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then + as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' + as_echo_n='/usr/ucb/echo -n' + else + as_echo_body='eval expr "X$1" : "X\\(.*\\)"' + as_echo_n_body='eval + arg=$1; + case $arg in #( + *"$as_nl"*) + expr "X$arg" : "X\\(.*\\)$as_nl"; + arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; + esac; + expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" + ' + export as_echo_n_body + as_echo_n='sh -c $as_echo_n_body as_echo' + fi + export as_echo_body + as_echo='sh -c $as_echo_body as_echo' +fi + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +IFS=" "" $as_nl" + +# Find who we are. Look in the path if we contain no directory separator. +as_myself= +case $0 in #(( + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break + done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + exit 1 +fi + +# Unset variables that we do not need and which cause bugs (e.g. in +# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" +# suppresses any "Segmentation fault" message there. '((' could +# trigger a bug in pdksh 5.2.14. +for as_var in BASH_ENV ENV MAIL MAILPATH +do eval test x\${$as_var+set} = xset \ + && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +done +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# CDPATH. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + + +# as_fn_error STATUS ERROR [LINENO LOG_FD] +# ---------------------------------------- +# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are +# provided, also output the error to LOG_FD, referencing LINENO. Then exit the +# script with STATUS, using 1 if that was 0. +as_fn_error () +{ + as_status=$1; test $as_status -eq 0 && as_status=1 + if test "$4"; then + as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + fi + $as_echo "$as_me: error: $2" >&2 + as_fn_exit $as_status +} # as_fn_error + + +# as_fn_set_status STATUS +# ----------------------- +# Set $? to STATUS, without forking. +as_fn_set_status () +{ + return $1 +} # as_fn_set_status + +# as_fn_exit STATUS +# ----------------- +# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. +as_fn_exit () +{ + set +e + as_fn_set_status $1 + exit $1 +} # as_fn_exit + +# as_fn_unset VAR +# --------------- +# Portably unset VAR. +as_fn_unset () +{ + { eval $1=; unset $1;} +} +as_unset=as_fn_unset +# as_fn_append VAR VALUE +# ---------------------- +# Append the text in VALUE to the end of the definition contained in VAR. Take +# advantage of any shell optimizations that allow amortized linear growth over +# repeated appends, instead of the typical quadratic growth present in naive +# implementations. +if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : + eval 'as_fn_append () + { + eval $1+=\$2 + }' +else + as_fn_append () + { + eval $1=\$$1\$2 + } +fi # as_fn_append + +# as_fn_arith ARG... +# ------------------ +# Perform arithmetic evaluation on the ARGs, and store the result in the +# global $as_val. Take advantage of shells that can avoid forks. The arguments +# must be portable across $(()) and expr. +if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : + eval 'as_fn_arith () + { + as_val=$(( $* )) + }' +else + as_fn_arith () + { + as_val=`expr "$@" || test $? -eq 1` + } +fi # as_fn_arith + + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in #((((( +-n*) + case `echo 'xy\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + xy) ECHO_C='\c';; + *) echo `echo ksh88 bug on AIX 6.1` > /dev/null + ECHO_T=' ';; + esac;; +*) + ECHO_N='-n';; +esac + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null +fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -pR'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -pR' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else + as_ln_s='cp -pR' + fi +else + as_ln_s='cp -pR' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + + +# as_fn_mkdir_p +# ------------- +# Create "$as_dir" as a directory, including parents if necessary. +as_fn_mkdir_p () +{ + + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || eval $as_mkdir_p || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" + + +} # as_fn_mkdir_p +if mkdir -p . 2>/dev/null; then + as_mkdir_p='mkdir -p "$as_dir"' +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + + +# as_fn_executable_p FILE +# ----------------------- +# Test if FILE is an executable regular file. +as_fn_executable_p () +{ + test -f "$1" && test -x "$1" +} # as_fn_executable_p +as_test_x='test -x' +as_executable_p=as_fn_executable_p + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + +exec 6>&1 +## ----------------------------------- ## +## Main body of $CONFIG_STATUS script. ## +## ----------------------------------- ## +_ASEOF +test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# Save the log message, to keep $0 and so on meaningful, and to +# report actual input values of CONFIG_FILES etc. instead of their +# values after options handling. +ac_log=" +This file was extended by RISC-V ISA Simulator $as_me ?, which was +generated by GNU Autoconf 2.69. Invocation command line was + + CONFIG_FILES = $CONFIG_FILES + CONFIG_HEADERS = $CONFIG_HEADERS + CONFIG_LINKS = $CONFIG_LINKS + CONFIG_COMMANDS = $CONFIG_COMMANDS + $ $0 $@ + +on `(hostname || uname -n) 2>/dev/null | sed 1q` +" + +_ACEOF + +case $ac_config_files in *" +"*) set x $ac_config_files; shift; ac_config_files=$*;; +esac + +case $ac_config_headers in *" +"*) set x $ac_config_headers; shift; ac_config_headers=$*;; +esac + + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +# Files that config.status was made for. +config_files="$ac_config_files" +config_headers="$ac_config_headers" + +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +ac_cs_usage="\ +\`$as_me' instantiates files and other configuration actions +from templates according to the current configuration. Unless the files +and actions are specified as TAGs, all are instantiated by default. + +Usage: $0 [OPTION]... [TAG]... + + -h, --help print this help, then exit + -V, --version print version number and configuration settings, then exit + --config print configuration, then exit + -q, --quiet, --silent + do not print progress messages + -d, --debug don't remove temporary files + --recheck update $as_me by reconfiguring in the same conditions + --file=FILE[:TEMPLATE] + instantiate the configuration file FILE + --header=FILE[:TEMPLATE] + instantiate the configuration header FILE + +Configuration files: +$config_files + +Configuration headers: +$config_headers + +Report bugs to ." + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" +ac_cs_version="\\ +RISC-V ISA Simulator config.status ? +configured by $0, generated by GNU Autoconf 2.69, + with options \\"\$ac_cs_config\\" + +Copyright (C) 2012 Free Software Foundation, Inc. +This config.status script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it." + +ac_pwd='$ac_pwd' +srcdir='$srcdir' +INSTALL='$INSTALL' +test -n "\$AWK" || AWK=awk +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# The default lists apply if the user does not specify any file. +ac_need_defaults=: +while test $# != 0 +do + case $1 in + --*=?*) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` + ac_shift=: + ;; + --*=) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg= + ac_shift=: + ;; + *) + ac_option=$1 + ac_optarg=$2 + ac_shift=shift + ;; + esac + + case $ac_option in + # Handling of the options. + -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) + ac_cs_recheck=: ;; + --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) + $as_echo "$ac_cs_version"; exit ;; + --config | --confi | --conf | --con | --co | --c ) + $as_echo "$ac_cs_config"; exit ;; + --debug | --debu | --deb | --de | --d | -d ) + debug=: ;; + --file | --fil | --fi | --f ) + $ac_shift + case $ac_optarg in + *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + '') as_fn_error $? "missing file argument" ;; + esac + as_fn_append CONFIG_FILES " '$ac_optarg'" + ac_need_defaults=false;; + --header | --heade | --head | --hea ) + $ac_shift + case $ac_optarg in + *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + as_fn_append CONFIG_HEADERS " '$ac_optarg'" + ac_need_defaults=false;; + --he | --h) + # Conflict between --help and --header + as_fn_error $? "ambiguous option: \`$1' +Try \`$0 --help' for more information.";; + --help | --hel | -h ) + $as_echo "$ac_cs_usage"; exit ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil | --si | --s) + ac_cs_silent=: ;; + + # This is an error. + -*) as_fn_error $? "unrecognized option: \`$1' +Try \`$0 --help' for more information." ;; + + *) as_fn_append ac_config_targets " $1" + ac_need_defaults=false ;; + + esac + shift +done + +ac_configure_extra_args= + +if $ac_cs_silent; then + exec 6>/dev/null + ac_configure_extra_args="$ac_configure_extra_args --silent" +fi + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +if \$ac_cs_recheck; then + set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion + shift + \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 + CONFIG_SHELL='$SHELL' + export CONFIG_SHELL + exec "\$@" +fi + +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +exec 5>>config.log +{ + echo + sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX +## Running $as_me. ## +_ASBOX + $as_echo "$ac_log" +} >&5 + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 + +# Handling of arguments. +for ac_config_target in $ac_config_targets +do + case $ac_config_target in + "fesvr.mk") CONFIG_FILES="$CONFIG_FILES fesvr.mk:fesvr/fesvr.mk.in" ;; + "riscv.mk") CONFIG_FILES="$CONFIG_FILES riscv.mk:riscv/riscv.mk.in" ;; + "disasm.mk") CONFIG_FILES="$CONFIG_FILES disasm.mk:disasm/disasm.mk.in" ;; + "customext.mk") CONFIG_FILES="$CONFIG_FILES customext.mk:customext/customext.mk.in" ;; + "fdt.mk") CONFIG_FILES="$CONFIG_FILES fdt.mk:fdt/fdt.mk.in" ;; + "softfloat.mk") CONFIG_FILES="$CONFIG_FILES softfloat.mk:softfloat/softfloat.mk.in" ;; + "spike_main.mk") CONFIG_FILES="$CONFIG_FILES spike_main.mk:spike_main/spike_main.mk.in" ;; + "spike_dasm.mk") CONFIG_FILES="$CONFIG_FILES spike_dasm.mk:spike_dasm/spike_dasm.mk.in" ;; + "config.h") CONFIG_HEADERS="$CONFIG_HEADERS config.h" ;; + "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; + "riscv-fesvr.pc") CONFIG_FILES="$CONFIG_FILES riscv-fesvr.pc" ;; + "riscv-disasm.pc") CONFIG_FILES="$CONFIG_FILES riscv-disasm.pc" ;; + + *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; + esac +done + + +# If the user did not use the arguments to specify the items to instantiate, +# then the envvar interface is used. Set only those that are not. +# We use the long form for the default assignment because of an extremely +# bizarre bug on SunOS 4.1.3. +if $ac_need_defaults; then + test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files + test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers +fi + +# Have a temporary directory for convenience. Make it in the build tree +# simply because there is no reason against having it here, and in addition, +# creating and moving files from /tmp can sometimes cause problems. +# Hook for its removal unless debugging. +# Note that there is a small window in which the directory will not be cleaned: +# after its creation but before its name has been assigned to `$tmp'. +$debug || +{ + tmp= ac_tmp= + trap 'exit_status=$? + : "${ac_tmp:=$tmp}" + { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status +' 0 + trap 'as_fn_exit 1' 1 2 13 15 +} +# Create a (secure) tmp directory for tmp files. + +{ + tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && + test -d "$tmp" +} || +{ + tmp=./conf$$-$RANDOM + (umask 077 && mkdir "$tmp") +} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 +ac_tmp=$tmp + +# Set up the scripts for CONFIG_FILES section. +# No need to generate them if there are no CONFIG_FILES. +# This happens for instance with `./config.status config.h'. +if test -n "$CONFIG_FILES"; then + + +ac_cr=`echo X | tr X '\015'` +# On cygwin, bash can eat \r inside `` if the user requested igncr. +# But we know of no other shell where ac_cr would be empty at this +# point, so we can use a bashism as a fallback. +if test "x$ac_cr" = x; then + eval ac_cr=\$\'\\r\' +fi +ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` +if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then + ac_cs_awk_cr='\\r' +else + ac_cs_awk_cr=$ac_cr +fi + +echo 'BEGIN {' >"$ac_tmp/subs1.awk" && +_ACEOF + + +{ + echo "cat >conf$$subs.awk <<_ACEOF" && + echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && + echo "_ACEOF" +} >conf$$subs.sh || + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 +ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` +ac_delim='%!_!# ' +for ac_last_try in false false false false false :; do + . ./conf$$subs.sh || + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 + + ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` + if test $ac_delim_n = $ac_delim_num; then + break + elif $ac_last_try; then + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 + else + ac_delim="$ac_delim!$ac_delim _$ac_delim!! " + fi +done +rm -f conf$$subs.sh + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && +_ACEOF +sed -n ' +h +s/^/S["/; s/!.*/"]=/ +p +g +s/^[^!]*!// +:repl +t repl +s/'"$ac_delim"'$// +t delim +:nl +h +s/\(.\{148\}\)..*/\1/ +t more1 +s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ +p +n +b repl +:more1 +s/["\\]/\\&/g; s/^/"/; s/$/"\\/ +p +g +s/.\{148\}// +t nl +:delim +h +s/\(.\{148\}\)..*/\1/ +t more2 +s/["\\]/\\&/g; s/^/"/; s/$/"/ +p +b +:more2 +s/["\\]/\\&/g; s/^/"/; s/$/"\\/ +p +g +s/.\{148\}// +t delim +' >$CONFIG_STATUS || ac_write_fail=1 +rm -f conf$$subs.awk +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +_ACAWK +cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && + for (key in S) S_is_set[key] = 1 + FS = "" + +} +{ + line = $ 0 + nfields = split(line, field, "@") + substed = 0 + len = length(field[1]) + for (i = 2; i < nfields; i++) { + key = field[i] + keylen = length(key) + if (S_is_set[key]) { + value = S[key] + line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) + len += length(value) + length(field[++i]) + substed = 1 + } else + len += 1 + keylen + } + + print line +} + +_ACAWK +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then + sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" +else + cat +fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ + || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 +_ACEOF + +# VPATH may cause trouble with some makes, so we remove sole $(srcdir), +# ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and +# trailing colons and then remove the whole line if VPATH becomes empty +# (actually we leave an empty line to preserve line numbers). +if test "x$srcdir" = x.; then + ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ +h +s/// +s/^/:/ +s/[ ]*$/:/ +s/:\$(srcdir):/:/g +s/:\${srcdir}:/:/g +s/:@srcdir@:/:/g +s/^:*// +s/:*$// +x +s/\(=[ ]*\).*/\1/ +G +s/\n// +s/^[^=]*=[ ]*$// +}' +fi + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +fi # test -n "$CONFIG_FILES" + +# Set up the scripts for CONFIG_HEADERS section. +# No need to generate them if there are no CONFIG_HEADERS. +# This happens for instance with `./config.status Makefile'. +if test -n "$CONFIG_HEADERS"; then +cat >"$ac_tmp/defines.awk" <<\_ACAWK || +BEGIN { +_ACEOF + +# Transform confdefs.h into an awk script `defines.awk', embedded as +# here-document in config.status, that substitutes the proper values into +# config.h.in to produce config.h. + +# Create a delimiter string that does not exist in confdefs.h, to ease +# handling of long lines. +ac_delim='%!_!# ' +for ac_last_try in false false :; do + ac_tt=`sed -n "/$ac_delim/p" confdefs.h` + if test -z "$ac_tt"; then + break + elif $ac_last_try; then + as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5 + else + ac_delim="$ac_delim!$ac_delim _$ac_delim!! " + fi +done + +# For the awk script, D is an array of macro values keyed by name, +# likewise P contains macro parameters if any. Preserve backslash +# newline sequences. + +ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* +sed -n ' +s/.\{148\}/&'"$ac_delim"'/g +t rset +:rset +s/^[ ]*#[ ]*define[ ][ ]*/ / +t def +d +:def +s/\\$// +t bsnl +s/["\\]/\\&/g +s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ +D["\1"]=" \3"/p +s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p +d +:bsnl +s/["\\]/\\&/g +s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ +D["\1"]=" \3\\\\\\n"\\/p +t cont +s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p +t cont +d +:cont +n +s/.\{148\}/&'"$ac_delim"'/g +t clear +:clear +s/\\$// +t bsnlc +s/["\\]/\\&/g; s/^/"/; s/$/"/p +d +:bsnlc +s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p +b cont +' >$CONFIG_STATUS || ac_write_fail=1 + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 + for (key in D) D_is_set[key] = 1 + FS = "" +} +/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { + line = \$ 0 + split(line, arg, " ") + if (arg[1] == "#") { + defundef = arg[2] + mac1 = arg[3] + } else { + defundef = substr(arg[1], 2) + mac1 = arg[2] + } + split(mac1, mac2, "(") #) + macro = mac2[1] + prefix = substr(line, 1, index(line, defundef) - 1) + if (D_is_set[macro]) { + # Preserve the white space surrounding the "#". + print prefix "define", macro P[macro] D[macro] + next + } else { + # Replace #undef with comments. This is necessary, for example, + # in the case of _POSIX_SOURCE, which is predefined and required + # on some systems where configure will not decide to define it. + if (defundef == "undef") { + print "/*", prefix defundef, macro, "*/" + next + } + } +} +{ print } +_ACAWK +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 + as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 +fi # test -n "$CONFIG_HEADERS" + + +eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS " +shift +for ac_tag +do + case $ac_tag in + :[FHLC]) ac_mode=$ac_tag; continue;; + esac + case $ac_mode$ac_tag in + :[FHL]*:*);; + :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; + :[FH]-) ac_tag=-:-;; + :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; + esac + ac_save_IFS=$IFS + IFS=: + set x $ac_tag + IFS=$ac_save_IFS + shift + ac_file=$1 + shift + + case $ac_mode in + :L) ac_source=$1;; + :[FH]) + ac_file_inputs= + for ac_f + do + case $ac_f in + -) ac_f="$ac_tmp/stdin";; + *) # Look for the file first in the build tree, then in the source tree + # (if the path is not absolute). The absolute path cannot be DOS-style, + # because $ac_f cannot contain `:'. + test -f "$ac_f" || + case $ac_f in + [\\/$]*) false;; + *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; + esac || + as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; + esac + case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac + as_fn_append ac_file_inputs " '$ac_f'" + done + + # Let's still pretend it is `configure' which instantiates (i.e., don't + # use $as_me), people would be surprised to read: + # /* config.h. Generated by config.status. */ + configure_input='Generated from '` + $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' + `' by configure.' + if test x"$ac_file" != x-; then + configure_input="$ac_file. $configure_input" + { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 +$as_echo "$as_me: creating $ac_file" >&6;} + fi + # Neutralize special characters interpreted by sed in replacement strings. + case $configure_input in #( + *\&* | *\|* | *\\* ) + ac_sed_conf_input=`$as_echo "$configure_input" | + sed 's/[\\\\&|]/\\\\&/g'`;; #( + *) ac_sed_conf_input=$configure_input;; + esac + + case $ac_tag in + *:-:* | *:-) cat >"$ac_tmp/stdin" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; + esac + ;; + esac + + ac_dir=`$as_dirname -- "$ac_file" || +$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$ac_file" : 'X\(//\)[^/]' \| \ + X"$ac_file" : 'X\(//\)$' \| \ + X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$ac_file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + as_dir="$ac_dir"; as_fn_mkdir_p + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + + case $ac_mode in + :F) + # + # CONFIG_FILE + # + + case $INSTALL in + [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; + *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; + esac +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# If the template does not know about datarootdir, expand it. +# FIXME: This hack should be removed a few years after 2.60. +ac_datarootdir_hack=; ac_datarootdir_seen= +ac_sed_dataroot=' +/datarootdir/ { + p + q +} +/@datadir@/p +/@docdir@/p +/@infodir@/p +/@localedir@/p +/@mandir@/p' +case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in +*datarootdir*) ac_datarootdir_seen=yes;; +*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 +$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 + ac_datarootdir_hack=' + s&@datadir@&$datadir&g + s&@docdir@&$docdir&g + s&@infodir@&$infodir&g + s&@localedir@&$localedir&g + s&@mandir@&$mandir&g + s&\\\${datarootdir}&$datarootdir&g' ;; +esac +_ACEOF + +# Neutralize VPATH when `$srcdir' = `.'. +# Shell code in configure.ac might set extrasub. +# FIXME: do we really want to maintain this feature? +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +ac_sed_extra="$ac_vpsub +$extrasub +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +:t +/@[a-zA-Z_][a-zA-Z_0-9]*@/!b +s|@configure_input@|$ac_sed_conf_input|;t t +s&@top_builddir@&$ac_top_builddir_sub&;t t +s&@top_build_prefix@&$ac_top_build_prefix&;t t +s&@srcdir@&$ac_srcdir&;t t +s&@abs_srcdir@&$ac_abs_srcdir&;t t +s&@top_srcdir@&$ac_top_srcdir&;t t +s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t +s&@builddir@&$ac_builddir&;t t +s&@abs_builddir@&$ac_abs_builddir&;t t +s&@abs_top_builddir@&$ac_abs_top_builddir&;t t +s&@INSTALL@&$ac_INSTALL&;t t +$ac_datarootdir_hack +" +eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ + >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + +test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && + { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && + { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ + "$ac_tmp/out"`; test -z "$ac_out"; } && + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined" >&5 +$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined" >&2;} + + rm -f "$ac_tmp/stdin" + case $ac_file in + -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; + *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; + esac \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + ;; + :H) + # + # CONFIG_HEADER + # + if test x"$ac_file" != x-; then + { + $as_echo "/* $configure_input */" \ + && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" + } >"$ac_tmp/config.h" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then + { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 +$as_echo "$as_me: $ac_file is unchanged" >&6;} + else + rm -f "$ac_file" + mv "$ac_tmp/config.h" "$ac_file" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + fi + else + $as_echo "/* $configure_input */" \ + && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ + || as_fn_error $? "could not create -" "$LINENO" 5 + fi + ;; + + + esac + +done # for ac_tag + + +as_fn_exit 0 +_ACEOF +ac_clean_files=$ac_clean_files_save + +test $ac_write_fail = 0 || + as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 + + +# configure is writing to config.log, and then calls config.status. +# config.status does its own redirection, appending to config.log. +# Unfortunately, on DOS this fails, as config.log is still kept open +# by configure, so config.status won't be able to write to it; its +# output is simply discarded. So we exec the FD to /dev/null, +# effectively closing config.log, so it can be properly (re)opened and +# appended to by config.status. When coming back to configure, we +# need to make the FD available again. +if test "$no_create" != yes; then + ac_cs_success=: + ac_config_status_args= + test "$silent" = yes && + ac_config_status_args="$ac_config_status_args --quiet" + exec 5>/dev/null + $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false + exec 5>>config.log + # Use ||, not &&, to avoid exiting from the if with $? = 1, which + # would make configure fail if this is the last instruction. + $ac_cs_success || as_fn_exit 1 +fi +if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 +$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} +fi + diff --git a/vendor/riscv-isa-sim/configure.ac b/vendor/riscv-isa-sim/configure.ac new file mode 100644 index 00000000..13797a0b --- /dev/null +++ b/vendor/riscv-isa-sim/configure.ac @@ -0,0 +1,126 @@ +#========================================================================= +# Toplevel configure.ac for the Modular C++ Build System +#========================================================================= +# Please read the documenation in 'mcppbs-doc.txt' for more details on +# how the Modular C++ Build System works. For most new projects, a +# developer will only need to make the following changes: +# +# - change the project metadata listed right below +# - update the list of subprojects via the 'MCPPBS_SUBPROJECTS' macro +# - possibly add subproject groups if needed to ease configuration +# - add more configure checks for platform specific configuration +# + +#------------------------------------------------------------------------- +# Project metadata +#------------------------------------------------------------------------- + +m4_define( proj_name, [RISC-V ISA Simulator]) +m4_define( proj_maintainer, [Andrew Waterman]) +m4_define( proj_abbreviation, [spike]) + +#------------------------------------------------------------------------- +# Project version information +#------------------------------------------------------------------------- +# Version information is meant to be managed through a version control +# system's tags and revision numbers. In a working copy the version will +# not be defined here (you should just use the version control system's +# mechanisms). When we make a distribution then we can set the version +# here as formed by the scripts/vcs-version.sh script so that the +# distribution knows what version it came from. If you are not using +# version control then it is fine to set this directly. + +m4_define( proj_version, [?]) + +#------------------------------------------------------------------------- +# Setup +#------------------------------------------------------------------------- + +AC_INIT(proj_name,proj_version,proj_maintainer,proj_abbreviation) +AC_LANG_CPLUSPLUS +AC_CONFIG_SRCDIR([riscv/common.h]) +AC_CONFIG_AUX_DIR([scripts]) +AC_CANONICAL_BUILD +AC_CANONICAL_HOST + +m4_include(ax_require_defined.m4) +m4_include(ax_append_flag.m4) +m4_include(ax_check_compile_flag.m4) +m4_include(ax_check_link_flag.m4) +m4_include(ax_append_link_flags.m4) +m4_include(ax_boost_base.m4) +m4_include(ax_boost_asio.m4) +m4_include(ax_boost_regex.m4) + +#------------------------------------------------------------------------- +# Checks for programs +#------------------------------------------------------------------------- + +AC_PROG_CC +AC_PROG_CXX +AC_CHECK_TOOL([AR],[ar]) +AC_CHECK_TOOL([RANLIB],[ranlib]) +AC_PATH_PROG([DTC],[dtc],[no]) +AS_IF([test x"$DTC" == xno],AC_MSG_ERROR([device-tree-compiler not found])) +AC_DEFINE_UNQUOTED(DTC, ["dtc"], [Executable name of device-tree-compiler]) + +AC_C_BIGENDIAN + +#------------------------------------------------------------------------- +# MCPPBS specific program checks +#------------------------------------------------------------------------- +# These macros check to see if we can do a stow-based install and also +# check for an isa simulator suitable for running the unit test programs +# via the makefile. + +MCPPBS_PROG_INSTALL + +#------------------------------------------------------------------------- +# Checks for header files +#------------------------------------------------------------------------- + +AC_HEADER_STDC + +#------------------------------------------------------------------------- +# Checks for type +#------------------------------------------------------------------------- + +AC_CHECK_TYPE([__int128_t], AC_SUBST([HAVE_INT128],[yes])) + +#------------------------------------------------------------------------- +# Default compiler flags +#------------------------------------------------------------------------- + +AX_APPEND_LINK_FLAGS([-Wl,--export-dynamic]) + +AX_CHECK_COMPILE_FLAG([-relocatable-pch], AC_SUBST([HAVE_CLANG_PCH],[yes])) + +#------------------------------------------------------------------------- +# MCPPBS subproject list +#------------------------------------------------------------------------- +# Order list so that subprojects only depend on those listed earlier. +# The '*' suffix indicates an optional subproject. The '**' suffix +# indicates an optional subproject which is also the name of a group. + +MCPPBS_SUBPROJECTS([ fesvr, riscv, disasm, customext, fdt, softfloat, spike_main, spike_dasm ]) + +#------------------------------------------------------------------------- +# MCPPBS subproject groups +#------------------------------------------------------------------------- +# If a group has the same name as a subproject then you must add the +# '**' suffix in the subproject list above. The list of subprojects in a +# group should be ordered so that subprojets only depend on those listed +# earlier. Here is an example: +# +# MCPPBS_GROUP( [group-name], [sproja,sprojb,...] ) +# + +#------------------------------------------------------------------------- +# Output +#------------------------------------------------------------------------- + +AC_CONFIG_HEADERS([config.h]) +AC_CONFIG_FILES([Makefile]) +AC_CONFIG_FILES([riscv-fesvr.pc]) +AC_CONFIG_FILES([riscv-disasm.pc]) +AC_OUTPUT diff --git a/vendor/riscv-isa-sim/customext/cflush.cc b/vendor/riscv-isa-sim/customext/cflush.cc new file mode 100644 index 00000000..1a5cfa2d --- /dev/null +++ b/vendor/riscv-isa-sim/customext/cflush.cc @@ -0,0 +1,42 @@ +#include "insn_macros.h" +#include "extension.h" +#include + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return xpr_name[insn.rs1()]; + } +} xrs1; + +static reg_t custom_cflush(processor_t* p, insn_t insn, reg_t pc) +{ + require_privilege(PRV_M); + + return pc + 4; \ +} + +class cflush_t : public extension_t +{ + public: + const char* name() { return "cflush"; } + + cflush_t() {} + + std::vector get_instructions() { + std::vector insns; + insns.push_back((insn_desc_t){true, 0xFC000073, 0xFFF07FFF, custom_cflush, custom_cflush, custom_cflush, custom_cflush}); + insns.push_back((insn_desc_t){true, 0xFC200073, 0xFFF07FFF, custom_cflush, custom_cflush, custom_cflush, custom_cflush}); + insns.push_back((insn_desc_t){true, 0xFC100073, 0xFFF07FFF, custom_cflush, custom_cflush, custom_cflush, custom_cflush}); + return insns; + } + + std::vector get_disasms() { + std::vector insns; + insns.push_back(new disasm_insn_t("cflush.d.l1", 0xFC000073, 0xFFF07FFF, {&xrs1})); + insns.push_back(new disasm_insn_t("cdiscard.d.l1", 0xFC200073, 0xFFF07FFF, {&xrs1})); + insns.push_back(new disasm_insn_t("cflush.i.l1", 0xFC100073, 0xFFF07FFF, {&xrs1})); + return insns; + } +}; + +REGISTER_EXTENSION(cflush, []() { return new cflush_t; }) diff --git a/vendor/riscv-isa-sim/customext/customext.ac b/vendor/riscv-isa-sim/customext/customext.ac new file mode 100644 index 00000000..e69de29b diff --git a/vendor/riscv-isa-sim/customext/customext.mk.in b/vendor/riscv-isa-sim/customext/customext.mk.in new file mode 100644 index 00000000..a14e771c --- /dev/null +++ b/vendor/riscv-isa-sim/customext/customext.mk.in @@ -0,0 +1,11 @@ +customext_subproject_deps = \ + spike_main \ + riscv \ + disasm \ + softfloat \ + +customext_srcs = \ + dummy_rocc.cc \ + cflush.cc \ + +customext_install_shared_lib = yes diff --git a/vendor/riscv-isa-sim/customext/dummy_rocc.cc b/vendor/riscv-isa-sim/customext/dummy_rocc.cc new file mode 100644 index 00000000..85ab7aa6 --- /dev/null +++ b/vendor/riscv-isa-sim/customext/dummy_rocc.cc @@ -0,0 +1,47 @@ +#include "rocc.h" +#include "mmu.h" +#include + +class dummy_rocc_t : public rocc_t +{ + public: + const char* name() { return "dummy_rocc"; } + + reg_t custom0(rocc_insn_t insn, reg_t xs1, reg_t xs2) + { + reg_t prev_acc = acc[insn.rs2]; + + if (insn.rs2 >= num_acc) + illegal_instruction(); + + switch (insn.funct) + { + case 0: // acc <- xs1 + acc[insn.rs2] = xs1; + break; + case 1: // xd <- acc (the only real work is the return statement below) + break; + case 2: // acc[rs2] <- Mem[xs1] + acc[insn.rs2] = p->get_mmu()->load_uint64(xs1); + break; + case 3: // acc[rs2] <- accX + xs1 + acc[insn.rs2] += xs1; + break; + default: + illegal_instruction(); + } + + return prev_acc; // in all cases, xd <- previous value of acc[rs2] + } + + dummy_rocc_t() + { + memset(acc, 0, sizeof(acc)); + } + + private: + static const int num_acc = 4; + reg_t acc[num_acc]; +}; + +REGISTER_EXTENSION(dummy_rocc, []() { return new dummy_rocc_t; }) diff --git a/vendor/riscv-isa-sim/customext/dummy_rocc_test.c b/vendor/riscv-isa-sim/customext/dummy_rocc_test.c new file mode 100644 index 00000000..94de8c04 --- /dev/null +++ b/vendor/riscv-isa-sim/customext/dummy_rocc_test.c @@ -0,0 +1,29 @@ +// The following is a RISC-V program to test the functionality of the +// dummy RoCC accelerator. +// Compile with riscv64-unknown-elf-gcc dummy_rocc_test.c +// Run with spike --extension=dummy_rocc pk a.out + +#include +#include +#include + +int main() { + uint64_t x = 123, y = 456, z = 0; + // load x into accumulator 2 (funct=0) + asm volatile ("custom0 x0, %0, 2, 0" : : "r"(x)); + // read it back into z (funct=1) to verify it + asm volatile ("custom0 %0, x0, 2, 1" : "=r"(z)); + assert(z == x); + // accumulate 456 into it (funct=3) + asm volatile ("custom0 x0, %0, 2, 3" : : "r"(y)); + // verify it + asm volatile ("custom0 %0, x0, 2, 1" : "=r"(z)); + assert(z == x+y); + // do it all again, but initialize acc2 via memory this time (funct=2) + asm volatile ("custom0 x0, %0, 2, 2" : : "r"(&x)); + asm volatile ("custom0 x0, %0, 2, 3" : : "r"(y)); + asm volatile ("custom0 %0, x0, 2, 1" : "=r"(z)); + assert(z == x+y); + + printf("success!\n"); +} diff --git a/vendor/riscv-isa-sim/debug_rom/.gitignore b/vendor/riscv-isa-sim/debug_rom/.gitignore new file mode 100644 index 00000000..98bd13e4 --- /dev/null +++ b/vendor/riscv-isa-sim/debug_rom/.gitignore @@ -0,0 +1,5 @@ +/debug_rom +/debug_rom32 +/debug_rom64 +/debug_rom32.h +/debug_rom64.h diff --git a/vendor/riscv-isa-sim/debug_rom/Makefile b/vendor/riscv-isa-sim/debug_rom/Makefile new file mode 100644 index 00000000..c5f2205d --- /dev/null +++ b/vendor/riscv-isa-sim/debug_rom/Makefile @@ -0,0 +1,24 @@ +# Recursive make is bad, but in this case we're cross compiling which is a +# pretty unusual use case. + +CC = $(RISCV)/bin/riscv64-unknown-elf-gcc +OBJCOPY = $(RISCV)/bin/riscv64-unknown-elf-objcopy + +COMPILE = $(CC) -nostdlib -nostartfiles -I.. -Tlink.ld + +ELFS = debug_rom +DEPS = debug_rom.S link.ld ../riscv/debug_rom_defines.h ../riscv/encoding.h + +all: $(patsubst %,%.h,$(ELFS)) + +%.h: %.raw + xxd -i $^ | sed "s/^unsigned/static const unsigned/" > $@ + +%.raw: % + $(OBJCOPY) -O binary --only-section .text $^ $@ + +debug_rom: $(DEPS) + $(COMPILE) -o $@ $^ + +clean: + rm -f $(ELFS) debug_rom*.raw debug_rom.h diff --git a/vendor/riscv-isa-sim/debug_rom/debug_rom.S b/vendor/riscv-isa-sim/debug_rom/debug_rom.S new file mode 100755 index 00000000..8d8e4cd0 --- /dev/null +++ b/vendor/riscv-isa-sim/debug_rom/debug_rom.S @@ -0,0 +1,79 @@ +// See LICENSE.SiFive for license details. + +#include "riscv/encoding.h" +#include "riscv/debug_rom_defines.h" + + .option norvc + .global entry + .global exception + + // Entry location on ebreak, Halt, or Breakpoint + // It is the same for all harts. They branch when + // their GO or RESUME bit is set. + +entry: + jal zero, _entry +resume: + // Not used. + jal zero, _resume +exception: + jal zero, _exception + +_entry: + // This fence is required because the execution may have written something + // into the Abstract Data or Program Buffer registers. + fence + csrw CSR_DSCRATCH, s0 // Save s0 to allow signaling MHARTID + + // We continue to let the hart know that we are halted in order that + // a DM which was reset is still made aware that a hart is halted. + // We keep checking both whether there is something the debugger wants + // us to do, or whether we should resume. +entry_loop: + csrr s0, CSR_MHARTID + sw s0, DEBUG_ROM_HALTED(zero) + lbu s0, DEBUG_ROM_FLAGS(s0) // 1 byte flag per hart. Only one hart advances here. + andi s0, s0, (1 << DEBUG_ROM_FLAG_GO) + bnez s0, going + csrr s0, CSR_MHARTID + lbu s0, DEBUG_ROM_FLAGS(s0) // multiple harts can resume here + andi s0, s0, (1 << DEBUG_ROM_FLAG_RESUME) + bnez s0, _resume + wfi + jal zero, entry_loop + +_exception: + // Restore S0, which we always save to dscratch. + // We need this in case the user tried an abstract write to a + // non-existent CSR. + csrr s0, CSR_DSCRATCH + sw zero, DEBUG_ROM_EXCEPTION(zero) // Let debug module know you got an exception. + ebreak + +going: + csrr s0, CSR_MHARTID + sw s0, DEBUG_ROM_GOING(zero) // When debug module sees this write, the GO flag is reset. + csrr s0, CSR_DSCRATCH // Restore s0 here + fence + fence.i + jalr zero, zero, %lo(whereto) // Debug module will put different instructions and data in the RAM, + // so we use fence and fence.i for safety. (rocket-chip doesn't have this + // because jalr is special there) + +_resume: + csrr s0, CSR_MHARTID + sw s0, DEBUG_ROM_RESUMING(zero) // When Debug Module sees this write, the RESUME flag is reset. + csrr s0, CSR_DSCRATCH // Restore s0 + dret + + // END OF ACTUAL "ROM" CONTENTS. BELOW IS JUST FOR LINKER SCRIPT. + +.section .whereto +whereto: + nop + // Variable "ROM" This is : jal x0 abstract, jal x0 program_buffer, + // or jal x0 resume, as desired. + // Debug Module state machine tracks what is 'desired'. + // We don't need/want to use jalr here because all of the + // Variable ROM contents are set by + // Debug Module before setting the OK_GO byte. diff --git a/vendor/riscv-isa-sim/debug_rom/debug_rom.h b/vendor/riscv-isa-sim/debug_rom/debug_rom.h new file mode 100644 index 00000000..7edd5f68 --- /dev/null +++ b/vendor/riscv-isa-sim/debug_rom/debug_rom.h @@ -0,0 +1,13 @@ +static const unsigned char debug_rom_raw[] = { + 0x6f, 0x00, 0xc0, 0x00, 0x6f, 0x00, 0x00, 0x06, 0x6f, 0x00, 0x80, 0x03, + 0x0f, 0x00, 0xf0, 0x0f, 0x73, 0x10, 0x24, 0x7b, 0x73, 0x24, 0x40, 0xf1, + 0x23, 0x20, 0x80, 0x10, 0x03, 0x44, 0x04, 0x40, 0x13, 0x74, 0x14, 0x00, + 0x63, 0x14, 0x04, 0x02, 0x73, 0x24, 0x40, 0xf1, 0x03, 0x44, 0x04, 0x40, + 0x13, 0x74, 0x24, 0x00, 0x63, 0x18, 0x04, 0x02, 0x73, 0x00, 0x50, 0x10, + 0x6f, 0xf0, 0x9f, 0xfd, 0x73, 0x24, 0x20, 0x7b, 0x23, 0x26, 0x00, 0x10, + 0x73, 0x00, 0x10, 0x00, 0x73, 0x24, 0x40, 0xf1, 0x23, 0x22, 0x80, 0x10, + 0x73, 0x24, 0x20, 0x7b, 0x0f, 0x00, 0xf0, 0x0f, 0x0f, 0x10, 0x00, 0x00, + 0x67, 0x00, 0x00, 0x30, 0x73, 0x24, 0x40, 0xf1, 0x23, 0x24, 0x80, 0x10, + 0x73, 0x24, 0x20, 0x7b, 0x73, 0x00, 0x20, 0x7b +}; +static const unsigned int debug_rom_raw_len = 116; diff --git a/vendor/riscv-isa-sim/debug_rom/link.ld b/vendor/riscv-isa-sim/debug_rom/link.ld new file mode 100644 index 00000000..897c42da --- /dev/null +++ b/vendor/riscv-isa-sim/debug_rom/link.ld @@ -0,0 +1,15 @@ +OUTPUT_ARCH( "riscv" ) +ENTRY( entry ) +SECTIONS +{ + .whereto 0x300 : + { + *(.whereto) + } + . = 0x800; + .text : + { + *(.text) + } + _end = .; +} diff --git a/vendor/riscv-isa-sim/disasm/disasm.ac b/vendor/riscv-isa-sim/disasm/disasm.ac new file mode 100644 index 00000000..e69de29b diff --git a/vendor/riscv-isa-sim/disasm/disasm.cc b/vendor/riscv-isa-sim/disasm/disasm.cc new file mode 100644 index 00000000..d18f0892 --- /dev/null +++ b/vendor/riscv-isa-sim/disasm/disasm.cc @@ -0,0 +1,2147 @@ +// See LICENSE for license details. + +#include "disasm.h" +#include +#include +#include +#include +#include +#include + +// Indicates that the next arg (only) is optional. +// If the result of converting the next arg to a string is "" +// then it will not be printed. +static const arg_t* opt = nullptr; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.i_imm()) + '(' + xpr_name[insn.rs1()] + ')'; + } +} load_address; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.s_imm()) + '(' + xpr_name[insn.rs1()] + ')'; + } +} store_address; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::string("(") + xpr_name[insn.rs1()] + ')'; + } +} base_only_address; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return xpr_name[insn.rd()]; + } +} xrd; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return xpr_name[insn.rs1()]; + } +} xrs1; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return xpr_name[insn.rs2()]; + } +} xrs2; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return xpr_name[insn.rs3()]; + } +} xrs3; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return fpr_name[insn.rd()]; + } +} frd; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return fpr_name[insn.rs1()]; + } +} frs1; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return fpr_name[insn.rs2()]; + } +} frs2; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return fpr_name[insn.rs3()]; + } +} frs3; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + switch (insn.csr()) + { + #define DECLARE_CSR(name, num) case num: return #name; + #include "encoding.h" + #undef DECLARE_CSR + default: + { + char buf[16]; + snprintf(buf, sizeof buf, "unknown_%03" PRIx64, insn.csr()); + return std::string(buf); + } + } + } +} csr; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.i_imm()); + } +} imm; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.shamt()); + } +} shamt; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + std::stringstream s; + s << std::hex << "0x" << ((uint32_t)insn.u_imm() >> 12); + return s.str(); + } +} bigimm; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string(insn.rs1()); + } +} zimm5; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + int32_t target = insn.sb_imm(); + std::string s = target >= 0 ? "pc + " : "pc - "; + s += std::to_string(abs(target)); + return s; + } +} branch_target; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + std::stringstream s; + int32_t target = insn.uj_imm(); + char sign = target >= 0 ? '+' : '-'; + s << "pc " << sign << std::hex << " 0x" << abs(target); + return s.str(); + } +} jump_target; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return xpr_name[insn.rvc_rs1()]; + } +} rvc_rs1; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return xpr_name[insn.rvc_rs2()]; + } +} rvc_rs2; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return fpr_name[insn.rvc_rs2()]; + } +} rvc_fp_rs2; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return xpr_name[insn.rvc_rs1s()]; + } +} rvc_rs1s; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return xpr_name[insn.rvc_rs2s()]; + } +} rvc_rs2s; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return fpr_name[insn.rvc_rs2s()]; + } +} rvc_fp_rs2s; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return xpr_name[X_SP]; + } +} rvc_sp; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.rvc_imm()); + } +} rvc_imm; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.rvc_addi4spn_imm()); + } +} rvc_addi4spn_imm; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.rvc_addi16sp_imm()); + } +} rvc_addi16sp_imm; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.rvc_lwsp_imm()); + } +} rvc_lwsp_imm; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)(insn.rvc_imm() & 0x3f)); + } +} rvc_shamt; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + std::stringstream s; + s << std::hex << "0x" << ((uint32_t)insn.rvc_imm() << 12 >> 12); + return s.str(); + } +} rvc_uimm; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.rvc_lwsp_imm()) + '(' + xpr_name[X_SP] + ')'; + } +} rvc_lwsp_address; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.rvc_ldsp_imm()) + '(' + xpr_name[X_SP] + ')'; + } +} rvc_ldsp_address; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.rvc_swsp_imm()) + '(' + xpr_name[X_SP] + ')'; + } +} rvc_swsp_address; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.rvc_sdsp_imm()) + '(' + xpr_name[X_SP] + ')'; + } +} rvc_sdsp_address; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.rvc_lw_imm()) + '(' + xpr_name[insn.rvc_rs1s()] + ')'; + } +} rvc_lw_address; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.rvc_ld_imm()) + '(' + xpr_name[insn.rvc_rs1s()] + ')'; + } +} rvc_ld_address; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + int32_t target = insn.rvc_b_imm(); + std::string s = target >= 0 ? "pc + " : "pc - "; + s += std::to_string(abs(target)); + return s; + } +} rvc_branch_target; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + int32_t target = insn.rvc_j_imm(); + std::string s = target >= 0 ? "pc + " : "pc - "; + s += std::to_string(abs(target)); + return s; + } +} rvc_jump_target; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::string("(") + xpr_name[insn.rs1()] + ')'; + } +} v_address; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return vr_name[insn.rd()]; + } +} vd; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return vr_name[insn.rs1()]; + } +} vs1; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return vr_name[insn.rs2()]; + } +} vs2; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return vr_name[insn.rd()]; + } +} vs3; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return insn.v_vm() ? "" : "v0.t"; + } +} vm; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return "v0"; + } +} v0; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.v_simm5()); + } +} v_simm5; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + std::stringstream s; + int sew = insn.v_sew(); + int lmul = insn.v_lmul(); + auto vta = insn.v_vta() == 1 ? "ta" : "tu"; + auto vma = insn.v_vma() == 1 ? "ma" : "mu"; + s << "e" << sew; + if(insn.v_frac_lmul()) { + std::string lmul_str = ""; + switch(lmul){ + case 3: + lmul_str = "f2"; + break; + case 2: + lmul_str = "f4"; + break; + case 1: + lmul_str = "f8"; + break; + default: + assert(true && "unsupport fractional LMUL"); + } + s << ", m" << lmul_str; + } else { + s << ", m" << (1 << lmul); + } + s << ", " << vta << ", " << vma; + return s.str(); + } +} v_vtype; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return "x0"; + } +} x0; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + std::string s; + auto iorw = insn.iorw(); + bool has_pre = false; + static const char type[] = "wroi"; + for (int i = 7; i >= 4; --i) { + if (iorw & (1ul << i)) { + s += type[i - 4]; + has_pre = true; + } + } + + s += (has_pre ? "," : ""); + for (int i = 3; i >= 0; --i) { + if (iorw & (1ul << i)) { + s += type[i]; + } + } + + return s; + } +} iorw; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.p_imm2()); + } +} p_imm2; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.p_imm3()); + } +} p_imm3; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.p_imm4()); + } +} p_imm4; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.p_imm5()); + } +} p_imm5; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.p_imm6()); + } +} p_imm6; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.bs()); + } +} bs; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.rcon()); + } +} rcon; + +typedef struct { + reg_t match; + reg_t mask; + const char *fmt; + std::vector& arg; +} custom_fmt_t; + +std::string disassembler_t::disassemble(insn_t insn) const +{ + const disasm_insn_t* disasm_insn = lookup(insn); + return disasm_insn ? disasm_insn->to_string(insn) : "unknown"; +} + +static void NOINLINE add_noarg_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {})); +} + +static void NOINLINE add_rtype_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrd, &xrs1, &xrs2})); +} + +static void NOINLINE add_r1type_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrd, &xrs1})); +} + +static void NOINLINE add_r3type_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrd, &xrs1, &xrs2, &xrs3})); +} + +static void NOINLINE add_itype_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrd, &xrs1, &imm})); +} + +static void NOINLINE add_itype_shift_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrd, &xrs1, &shamt})); +} + +static void NOINLINE add_xload_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrd, &load_address})); +} + +static void NOINLINE add_xstore_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrs2, &store_address})); +} + +static void NOINLINE add_fload_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&frd, &load_address})); +} + +static void NOINLINE add_fstore_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&frs2, &store_address})); +} + +static void NOINLINE add_xamo_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrd, &xrs2, &base_only_address})); +} + +static void NOINLINE add_xlr_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrd, &base_only_address})); +} + +static void NOINLINE add_xst_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrs2, &base_only_address})); +} + +static void NOINLINE add_btype_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrs1, &xrs2, &branch_target})); +} + +static void NOINLINE add_b1type_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + const uint32_t mask_rs2 = 0x1fUL << 20; + d->add_insn(new disasm_insn_t(name, match, mask | mask_rs2, {&xrs1, &branch_target})); +} + +static void NOINLINE add_frtype_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&frd, &frs1, &frs2})); +} + +static void NOINLINE add_fr1type_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&frd, &frs1})); +} + +static void NOINLINE add_fr3type_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&frd, &frs1, &frs2, &frs3})); +} + +static void NOINLINE add_fxtype_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrd, &frs1})); +} + +static void NOINLINE add_xftype_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&frd, &xrs1})); +} + +static void NOINLINE add_fx2type_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrd, &frs1, &frs2})); +} + +static void NOINLINE add_sfence_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrs1, &xrs2})); +} + +static void NOINLINE add_pitype3_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrd, &xrs1, &p_imm3})); +} + +static void NOINLINE add_pitype4_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrd, &xrs1, &p_imm4})); +} + +static void NOINLINE add_pitype5_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrd, &xrs1, &p_imm5})); +} + +static void NOINLINE add_pitype6_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrd, &xrs1, &p_imm6})); +} + +static void NOINLINE add_vector_v_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&vd, &vs2, opt, &vm})); +} + +static void NOINLINE add_vector_vv_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&vd, &vs2, &vs1, opt, &vm})); +} + +static void NOINLINE add_vector_vx_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&vd, &vs2, &xrs1, opt, &vm})); +} + +static void NOINLINE add_vector_vf_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&vd, &vs2, &frs1, opt, &vm})); +} + +static void NOINLINE add_vector_vi_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&vd, &vs2, &v_simm5, opt, &vm})); +} + +static void NOINLINE add_vector_viu_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&vd, &vs2, &zimm5, opt, &vm})); +} + +static void NOINLINE add_vector_vvm_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&vd, &vs2, &vs1, &v0})); +} + +static void NOINLINE add_vector_vxm_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&vd, &vs2, &xrs1, &v0})); +} + +static void NOINLINE add_vector_vim_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&vd, &vs2, &v_simm5, &v0})); +} + +static void NOINLINE add_unknown_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + std::string s = name; + s += " (args unknown)"; + + d->add_insn(new disasm_insn_t(s.c_str(), match, mask, {})); +} + + +static void NOINLINE add_unknown_insns(disassembler_t* d) +{ + // provide a default disassembly for all instructions as a fallback + #define DECLARE_INSN(code, match, mask) \ + add_unknown_insn(d, #code, match, mask); + #include "encoding.h" + #undef DECLARE_INSN +} + +void disassembler_t::add_instructions(const isa_parser_t* isa) +{ + const uint32_t mask_rd = 0x1fUL << 7; + const uint32_t match_rd_ra = 1UL << 7; + const uint32_t mask_rs1 = 0x1fUL << 15; + const uint32_t match_rs1_ra = 1UL << 15; + const uint32_t mask_rs2 = 0x1fUL << 20; + const uint32_t mask_imm = 0xfffUL << 20; + const uint32_t imm_shift = 20; + const uint32_t mask_rvc_rs2 = 0x1fUL << 2; + const uint32_t mask_rvc_imm = mask_rvc_rs2 | 0x1000UL; + const uint32_t mask_nf = 0x7Ul << 29; + const uint32_t mask_wd = 0x1Ul << 26; + const uint32_t mask_vm = 0x1Ul << 25; + const uint32_t mask_vldst = 0x7Ul << 12 | 0x1UL << 28; + const uint32_t mask_amoop = 0x1fUl << 27; + const uint32_t mask_width = 0x7Ul << 12; + + #define DECLARE_INSN(code, match, mask) \ + const uint32_t match_##code = match; \ + const uint32_t mask_##code = mask; + #include "encoding.h" + #undef DECLARE_INSN + + // explicit per-instruction disassembly + #define DISASM_INSN(name, code, extra, ...) \ + add_insn(new disasm_insn_t(name, match_##code, mask_##code | (extra), __VA_ARGS__)); + #define DEFINE_NOARG(code) add_noarg_insn(this, #code, match_##code, mask_##code); + #define DEFINE_RTYPE(code) add_rtype_insn(this, #code, match_##code, mask_##code); + #define DEFINE_R1TYPE(code) add_r1type_insn(this, #code, match_##code, mask_##code); + #define DEFINE_R3TYPE(code) add_r3type_insn(this, #code, match_##code, mask_##code); + #define DEFINE_ITYPE(code) add_itype_insn(this, #code, match_##code, mask_##code); + #define DEFINE_ITYPE_SHIFT(code) add_itype_shift_insn(this, #code, match_##code, mask_##code); + #define DEFINE_I0TYPE(name, code) DISASM_INSN(name, code, mask_rs1, {&xrd, &imm}) + #define DEFINE_I1TYPE(name, code) DISASM_INSN(name, code, mask_imm, {&xrd, &xrs1}) + #define DEFINE_I2TYPE(name, code) DISASM_INSN(name, code, mask_rd | mask_imm, {&xrs1}) + #define DEFINE_LTYPE(code) DISASM_INSN(#code, code, 0, {&xrd, &bigimm}) + #define DEFINE_BTYPE(code) add_btype_insn(this, #code, match_##code, mask_##code); + #define DEFINE_B1TYPE(name, code) add_b1type_insn(this, name, match_##code, mask_##code); + #define DEFINE_XLOAD(code) add_xload_insn(this, #code, match_##code, mask_##code); + #define DEFINE_XSTORE(code) add_xstore_insn(this, #code, match_##code, mask_##code); + #define DEFINE_XAMO(code) add_xamo_insn(this, #code, match_##code, mask_##code); + #define DEFINE_XLOAD_BASE(code) add_xlr_insn(this, #code, match_##code, mask_##code); + #define DEFINE_XSTORE_BASE(code) add_xst_insn(this, #code, match_##code, mask_##code); + #define DEFINE_FLOAD(code) add_fload_insn(this, #code, match_##code, mask_##code); + #define DEFINE_FSTORE(code) add_fstore_insn(this, #code, match_##code, mask_##code); + #define DEFINE_FRTYPE(code) add_frtype_insn(this, #code, match_##code, mask_##code); + #define DEFINE_FR1TYPE(code) add_fr1type_insn(this, #code, match_##code, mask_##code); + #define DEFINE_FR3TYPE(code) add_fr3type_insn(this, #code, match_##code, mask_##code); + #define DEFINE_FXTYPE(code) add_fxtype_insn(this, #code, match_##code, mask_##code); + #define DEFINE_FX2TYPE(code) add_fx2type_insn(this, #code, match_##code, mask_##code); + #define DEFINE_XFTYPE(code) add_xftype_insn(this, #code, match_##code, mask_##code); + #define DEFINE_SFENCE_TYPE(code) add_sfence_insn(this, #code, match_##code, mask_##code); + + add_insn(new disasm_insn_t("unimp", match_csrrw|(CSR_CYCLE<<20), 0xffffffff, {})); + add_insn(new disasm_insn_t("c.unimp", 0, 0xffff, {})); + + DEFINE_XLOAD(lb) + DEFINE_XLOAD(lbu) + DEFINE_XLOAD(lh) + DEFINE_XLOAD(lhu) + DEFINE_XLOAD(lw) + DEFINE_XLOAD(lwu) + DEFINE_XLOAD(ld) + + DEFINE_XSTORE(sb) + DEFINE_XSTORE(sh) + DEFINE_XSTORE(sw) + DEFINE_XSTORE(sd) + + if (isa->extension_enabled('A')) { + DEFINE_XAMO(amoadd_w) + DEFINE_XAMO(amoswap_w) + DEFINE_XAMO(amoand_w) + DEFINE_XAMO(amoor_w) + DEFINE_XAMO(amoxor_w) + DEFINE_XAMO(amomin_w) + DEFINE_XAMO(amomax_w) + DEFINE_XAMO(amominu_w) + DEFINE_XAMO(amomaxu_w) + DEFINE_XAMO(amoadd_d) + DEFINE_XAMO(amoswap_d) + DEFINE_XAMO(amoand_d) + DEFINE_XAMO(amoor_d) + DEFINE_XAMO(amoxor_d) + DEFINE_XAMO(amomin_d) + DEFINE_XAMO(amomax_d) + DEFINE_XAMO(amominu_d) + DEFINE_XAMO(amomaxu_d) + DEFINE_XLOAD_BASE(lr_w) + DEFINE_XAMO(sc_w) + DEFINE_XLOAD_BASE(lr_d) + DEFINE_XAMO(sc_d) + } + + add_insn(new disasm_insn_t("j", match_jal, mask_jal | mask_rd, {&jump_target})); + add_insn(new disasm_insn_t("jal", match_jal | match_rd_ra, mask_jal | mask_rd, {&jump_target})); + add_insn(new disasm_insn_t("jal", match_jal, mask_jal, {&xrd, &jump_target})); + + DEFINE_B1TYPE("beqz", beq); + DEFINE_B1TYPE("bnez", bne); + DEFINE_B1TYPE("bltz", blt); + DEFINE_B1TYPE("bgez", bge); + DEFINE_BTYPE(beq) + DEFINE_BTYPE(bne) + DEFINE_BTYPE(blt) + DEFINE_BTYPE(bge) + DEFINE_BTYPE(bltu) + DEFINE_BTYPE(bgeu) + + DEFINE_LTYPE(lui); + DEFINE_LTYPE(auipc); + + add_insn(new disasm_insn_t("ret", match_jalr | match_rs1_ra, mask_jalr | mask_rd | mask_rs1 | mask_imm, {})); + DEFINE_I2TYPE("jr", jalr); + add_insn(new disasm_insn_t("jalr", match_jalr | match_rd_ra, mask_jalr | mask_rd | mask_imm, {&xrs1})); + DEFINE_ITYPE(jalr); + + add_noarg_insn(this, "nop", match_addi, mask_addi | mask_rd | mask_rs1 | mask_imm); + DEFINE_I0TYPE("li", addi); + DEFINE_I1TYPE("mv", addi); + DEFINE_ITYPE(addi); + DEFINE_ITYPE(slti); + add_insn(new disasm_insn_t("seqz", match_sltiu | (1 << imm_shift), mask_sltiu | mask_imm, {&xrd, &xrs1})); + DEFINE_ITYPE(sltiu); + add_insn(new disasm_insn_t("not", match_xori | mask_imm, mask_xori | mask_imm, {&xrd, &xrs1})); + DEFINE_ITYPE(xori); + + DEFINE_ITYPE_SHIFT(slli); + DEFINE_ITYPE_SHIFT(srli); + DEFINE_ITYPE_SHIFT(srai); + + DEFINE_ITYPE(ori); + DEFINE_ITYPE(andi); + DEFINE_I1TYPE("sext.w", addiw); + DEFINE_ITYPE(addiw); + + DEFINE_ITYPE_SHIFT(slliw); + DEFINE_ITYPE_SHIFT(srliw); + DEFINE_ITYPE_SHIFT(sraiw); + + DEFINE_RTYPE(add); + DEFINE_RTYPE(sub); + DEFINE_RTYPE(sll); + DEFINE_RTYPE(slt); + add_insn(new disasm_insn_t("snez", match_sltu, mask_sltu | mask_rs1, {&xrd, &xrs2})); + DEFINE_RTYPE(sltu); + DEFINE_RTYPE(xor); + DEFINE_RTYPE(srl); + DEFINE_RTYPE(sra); + DEFINE_RTYPE(or); + DEFINE_RTYPE(and); + DEFINE_RTYPE(addw); + DEFINE_RTYPE(subw); + DEFINE_RTYPE(sllw); + DEFINE_RTYPE(srlw); + DEFINE_RTYPE(sraw); + + DEFINE_NOARG(ecall); + DEFINE_NOARG(ebreak); + DEFINE_NOARG(mret); + DEFINE_NOARG(dret); + DEFINE_NOARG(wfi); + add_insn(new disasm_insn_t("fence", match_fence, mask_fence, {&iorw})); + DEFINE_NOARG(fence_i); + + add_insn(new disasm_insn_t("csrr", match_csrrs, mask_csrrs | mask_rs1, {&xrd, &csr})); + add_insn(new disasm_insn_t("csrw", match_csrrw, mask_csrrw | mask_rd, {&csr, &xrs1})); + add_insn(new disasm_insn_t("csrs", match_csrrs, mask_csrrs | mask_rd, {&csr, &xrs1})); + add_insn(new disasm_insn_t("csrc", match_csrrc, mask_csrrc | mask_rd, {&csr, &xrs1})); + add_insn(new disasm_insn_t("csrwi", match_csrrwi, mask_csrrwi | mask_rd, {&csr, &zimm5})); + add_insn(new disasm_insn_t("csrsi", match_csrrsi, mask_csrrsi | mask_rd, {&csr, &zimm5})); + add_insn(new disasm_insn_t("csrci", match_csrrci, mask_csrrci | mask_rd, {&csr, &zimm5})); + add_insn(new disasm_insn_t("csrrw", match_csrrw, mask_csrrw, {&xrd, &csr, &xrs1})); + add_insn(new disasm_insn_t("csrrs", match_csrrs, mask_csrrs, {&xrd, &csr, &xrs1})); + add_insn(new disasm_insn_t("csrrc", match_csrrc, mask_csrrc, {&xrd, &csr, &xrs1})); + add_insn(new disasm_insn_t("csrrwi", match_csrrwi, mask_csrrwi, {&xrd, &csr, &zimm5})); + add_insn(new disasm_insn_t("csrrsi", match_csrrsi, mask_csrrsi, {&xrd, &csr, &zimm5})); + add_insn(new disasm_insn_t("csrrci", match_csrrci, mask_csrrci, {&xrd, &csr, &zimm5})); + + if (isa->extension_enabled('S')) { + DEFINE_NOARG(sret); + DEFINE_SFENCE_TYPE(sfence_vma); + } + + if (isa->extension_enabled('M')) { + DEFINE_RTYPE(mul); + DEFINE_RTYPE(mulh); + DEFINE_RTYPE(mulhu); + DEFINE_RTYPE(mulhsu); + DEFINE_RTYPE(mulw); + DEFINE_RTYPE(div); + DEFINE_RTYPE(divu); + DEFINE_RTYPE(rem); + DEFINE_RTYPE(remu); + DEFINE_RTYPE(divw); + DEFINE_RTYPE(divuw); + DEFINE_RTYPE(remw); + DEFINE_RTYPE(remuw); + } + + if (isa->extension_enabled(EXT_ZBA)) { + DEFINE_RTYPE(sh1add); + DEFINE_RTYPE(sh2add); + DEFINE_RTYPE(sh3add); + if (isa->get_max_xlen() == 64) { + DEFINE_ITYPE_SHIFT(slli_uw); + add_insn(new disasm_insn_t("zext.w", match_add_uw, mask_add_uw | mask_rs2, {&xrd, &xrs1})); + DEFINE_RTYPE(add_uw); + DEFINE_RTYPE(sh1add_uw); + DEFINE_RTYPE(sh2add_uw); + DEFINE_RTYPE(sh3add_uw); + } + } + + if (isa->extension_enabled(EXT_ZBB)) { + DEFINE_RTYPE(ror); + DEFINE_RTYPE(rol); + DEFINE_ITYPE_SHIFT(rori); + DEFINE_R1TYPE(ctz); + DEFINE_R1TYPE(clz); + DEFINE_R1TYPE(cpop); + DEFINE_RTYPE(min); + DEFINE_RTYPE(minu); + DEFINE_RTYPE(max); + DEFINE_RTYPE(maxu); + DEFINE_RTYPE(andn); + DEFINE_RTYPE(orn); + DEFINE_RTYPE(xnor); + DEFINE_R1TYPE(sext_b); + DEFINE_R1TYPE(sext_h); + add_insn(new disasm_insn_t("rev8", match_grevi | ((isa->get_max_xlen() - 8) << imm_shift), mask_grevi | mask_imm, {&xrd, &xrs1})); + add_insn(new disasm_insn_t("orc.b", match_gorci | (0x7 << imm_shift), mask_grevi | mask_imm, {&xrd, &xrs1})); + add_insn(new disasm_insn_t("zext.h", (isa->get_max_xlen() == 32 ? match_pack : match_packw), mask_pack | mask_rs2, {&xrd, &xrs1})); + if (isa->get_max_xlen() == 64) { + DEFINE_RTYPE(rorw); + DEFINE_RTYPE(rolw); + DEFINE_ITYPE_SHIFT(roriw); + DEFINE_R1TYPE(ctzw); + DEFINE_R1TYPE(clzw); + DEFINE_R1TYPE(cpopw); + } + } + + if (isa->extension_enabled(EXT_ZBS)) { + DEFINE_RTYPE(bclr); + DEFINE_RTYPE(binv); + DEFINE_RTYPE(bset); + DEFINE_RTYPE(bext); + DEFINE_ITYPE_SHIFT(bclri); + DEFINE_ITYPE_SHIFT(binvi); + DEFINE_ITYPE_SHIFT(bseti); + DEFINE_ITYPE_SHIFT(bexti); + } + + if (isa->extension_enabled(EXT_ZBKB)) { + add_insn(new disasm_insn_t("brev8", match_grevi | (0x7 << imm_shift), mask_grevi | mask_imm, {&xrd, &xrs1})); // brev8 + add_insn(new disasm_insn_t("rev8", match_grevi | ((isa->get_max_xlen() - 8) << imm_shift), mask_grevi | mask_imm, {&xrd, &xrs1})); + DEFINE_RTYPE(pack); + DEFINE_RTYPE(packh); + if (isa->get_max_xlen() == 64) { + DEFINE_RTYPE(packw); + } + } + + if (isa->extension_enabled(EXT_SVINVAL)) { + DEFINE_NOARG(sfence_w_inval); + DEFINE_NOARG(sfence_inval_ir); + DEFINE_SFENCE_TYPE(sinval_vma); + DEFINE_SFENCE_TYPE(hinval_vvma); + DEFINE_SFENCE_TYPE(hinval_gvma); + } + + if (isa->extension_enabled('F')) { + DEFINE_FLOAD(flw) + DEFINE_FSTORE(fsw) + DEFINE_FRTYPE(fadd_s); + DEFINE_FRTYPE(fsub_s); + DEFINE_FRTYPE(fmul_s); + DEFINE_FRTYPE(fdiv_s); + DEFINE_FR1TYPE(fsqrt_s); + DEFINE_FRTYPE(fmin_s); + DEFINE_FRTYPE(fmax_s); + DEFINE_FR3TYPE(fmadd_s); + DEFINE_FR3TYPE(fmsub_s); + DEFINE_FR3TYPE(fnmadd_s); + DEFINE_FR3TYPE(fnmsub_s); + DEFINE_FRTYPE(fsgnj_s); + DEFINE_FRTYPE(fsgnjn_s); + DEFINE_FRTYPE(fsgnjx_s); + DEFINE_FR1TYPE(fcvt_s_d); + DEFINE_FR1TYPE(fcvt_s_q); + DEFINE_XFTYPE(fcvt_s_l); + DEFINE_XFTYPE(fcvt_s_lu); + DEFINE_XFTYPE(fcvt_s_w); + DEFINE_XFTYPE(fcvt_s_wu); + DEFINE_XFTYPE(fcvt_s_wu); + DEFINE_XFTYPE(fmv_w_x); + DEFINE_FXTYPE(fcvt_l_s); + DEFINE_FXTYPE(fcvt_lu_s); + DEFINE_FXTYPE(fcvt_w_s); + DEFINE_FXTYPE(fcvt_wu_s); + DEFINE_FXTYPE(fclass_s); + DEFINE_FXTYPE(fmv_x_w); + DEFINE_FX2TYPE(feq_s); + DEFINE_FX2TYPE(flt_s); + DEFINE_FX2TYPE(fle_s); + } + + if (isa->extension_enabled(EXT_ZFINX)) { + DEFINE_RTYPE(fadd_s); + DEFINE_RTYPE(fsub_s); + DEFINE_RTYPE(fmul_s); + DEFINE_RTYPE(fdiv_s); + DEFINE_R1TYPE(fsqrt_s); + DEFINE_RTYPE(fmin_s); + DEFINE_RTYPE(fmax_s); + DEFINE_R3TYPE(fmadd_s); + DEFINE_R3TYPE(fmsub_s); + DEFINE_R3TYPE(fnmadd_s); + DEFINE_R3TYPE(fnmsub_s); + DEFINE_RTYPE(fsgnj_s); + DEFINE_RTYPE(fsgnjn_s); + DEFINE_RTYPE(fsgnjx_s); + DEFINE_R1TYPE(fcvt_s_d); + //DEFINE_R1TYPE(fcvt_s_q); + DEFINE_R1TYPE(fcvt_s_l); + DEFINE_R1TYPE(fcvt_s_lu); + DEFINE_R1TYPE(fcvt_s_w); + DEFINE_R1TYPE(fcvt_s_wu); + DEFINE_R1TYPE(fcvt_s_wu); + DEFINE_R1TYPE(fcvt_l_s); + DEFINE_R1TYPE(fcvt_lu_s); + DEFINE_R1TYPE(fcvt_w_s); + DEFINE_R1TYPE(fcvt_wu_s); + DEFINE_R1TYPE(fclass_s); + DEFINE_RTYPE(feq_s); + DEFINE_RTYPE(flt_s); + DEFINE_RTYPE(fle_s); + } + + if (isa->extension_enabled('D')) { + DEFINE_FLOAD(fld) + DEFINE_FSTORE(fsd) + DEFINE_FRTYPE(fadd_d); + DEFINE_FRTYPE(fsub_d); + DEFINE_FRTYPE(fmul_d); + DEFINE_FRTYPE(fdiv_d); + DEFINE_FR1TYPE(fsqrt_d); + DEFINE_FRTYPE(fmin_d); + DEFINE_FRTYPE(fmax_d); + DEFINE_FR3TYPE(fmadd_d); + DEFINE_FR3TYPE(fmsub_d); + DEFINE_FR3TYPE(fnmadd_d); + DEFINE_FR3TYPE(fnmsub_d); + DEFINE_FRTYPE(fsgnj_d); + DEFINE_FRTYPE(fsgnjn_d); + DEFINE_FRTYPE(fsgnjx_d); + DEFINE_FR1TYPE(fcvt_d_s); + DEFINE_FR1TYPE(fcvt_d_q); + DEFINE_XFTYPE(fcvt_d_l); + DEFINE_XFTYPE(fcvt_d_lu); + DEFINE_XFTYPE(fcvt_d_w); + DEFINE_XFTYPE(fcvt_d_wu); + DEFINE_XFTYPE(fcvt_d_wu); + DEFINE_XFTYPE(fmv_d_x); + DEFINE_FXTYPE(fcvt_l_d); + DEFINE_FXTYPE(fcvt_lu_d); + DEFINE_FXTYPE(fcvt_w_d); + DEFINE_FXTYPE(fcvt_wu_d); + DEFINE_FXTYPE(fclass_d); + DEFINE_FXTYPE(fmv_x_d); + DEFINE_FX2TYPE(feq_d); + DEFINE_FX2TYPE(flt_d); + DEFINE_FX2TYPE(fle_d); + } + + if (isa->extension_enabled(EXT_ZDINX)) { + DEFINE_RTYPE(fadd_d); + DEFINE_RTYPE(fsub_d); + DEFINE_RTYPE(fmul_d); + DEFINE_RTYPE(fdiv_d); + DEFINE_R1TYPE(fsqrt_d); + DEFINE_RTYPE(fmin_d); + DEFINE_RTYPE(fmax_d); + DEFINE_R3TYPE(fmadd_d); + DEFINE_R3TYPE(fmsub_d); + DEFINE_R3TYPE(fnmadd_d); + DEFINE_R3TYPE(fnmsub_d); + DEFINE_RTYPE(fsgnj_d); + DEFINE_RTYPE(fsgnjn_d); + DEFINE_RTYPE(fsgnjx_d); + DEFINE_R1TYPE(fcvt_d_s); + //DEFINE_R1TYPE(fcvt_d_q); + DEFINE_R1TYPE(fcvt_d_l); + DEFINE_R1TYPE(fcvt_d_lu); + DEFINE_R1TYPE(fcvt_d_w); + DEFINE_R1TYPE(fcvt_d_wu); + DEFINE_R1TYPE(fcvt_d_wu); + DEFINE_R1TYPE(fcvt_l_d); + DEFINE_R1TYPE(fcvt_lu_d); + DEFINE_R1TYPE(fcvt_w_d); + DEFINE_R1TYPE(fcvt_wu_d); + DEFINE_R1TYPE(fclass_d); + DEFINE_RTYPE(feq_d); + DEFINE_RTYPE(flt_d); + DEFINE_RTYPE(fle_d); + } + + if (isa->extension_enabled(EXT_ZFH)) { + DEFINE_FRTYPE(fadd_h); + DEFINE_FRTYPE(fsub_h); + DEFINE_FRTYPE(fmul_h); + DEFINE_FRTYPE(fdiv_h); + DEFINE_FR1TYPE(fsqrt_h); + DEFINE_FRTYPE(fmin_h); + DEFINE_FRTYPE(fmax_h); + DEFINE_FR3TYPE(fmadd_h); + DEFINE_FR3TYPE(fmsub_h); + DEFINE_FR3TYPE(fnmadd_h); + DEFINE_FR3TYPE(fnmsub_h); + DEFINE_FRTYPE(fsgnj_h); + DEFINE_FRTYPE(fsgnjn_h); + DEFINE_FRTYPE(fsgnjx_h); + DEFINE_XFTYPE(fcvt_h_l); + DEFINE_XFTYPE(fcvt_h_lu); + DEFINE_XFTYPE(fcvt_h_w); + DEFINE_XFTYPE(fcvt_h_wu); + DEFINE_XFTYPE(fcvt_h_wu); + DEFINE_FXTYPE(fcvt_l_h); + DEFINE_FXTYPE(fcvt_lu_h); + DEFINE_FXTYPE(fcvt_w_h); + DEFINE_FXTYPE(fcvt_wu_h); + DEFINE_FXTYPE(fclass_h); + DEFINE_FX2TYPE(feq_h); + DEFINE_FX2TYPE(flt_h); + DEFINE_FX2TYPE(fle_h); + } + + if (isa->extension_enabled(EXT_ZHINX)) { + DEFINE_RTYPE(fadd_h); + DEFINE_RTYPE(fsub_h); + DEFINE_RTYPE(fmul_h); + DEFINE_RTYPE(fdiv_h); + DEFINE_R1TYPE(fsqrt_h); + DEFINE_RTYPE(fmin_h); + DEFINE_RTYPE(fmax_h); + DEFINE_R3TYPE(fmadd_h); + DEFINE_R3TYPE(fmsub_h); + DEFINE_R3TYPE(fnmadd_h); + DEFINE_R3TYPE(fnmsub_h); + DEFINE_RTYPE(fsgnj_h); + DEFINE_RTYPE(fsgnjn_h); + DEFINE_RTYPE(fsgnjx_h); + DEFINE_R1TYPE(fcvt_h_l); + DEFINE_R1TYPE(fcvt_h_lu); + DEFINE_R1TYPE(fcvt_h_w); + DEFINE_R1TYPE(fcvt_h_wu); + DEFINE_R1TYPE(fcvt_h_wu); + DEFINE_R1TYPE(fcvt_l_h); + DEFINE_R1TYPE(fcvt_lu_h); + DEFINE_R1TYPE(fcvt_w_h); + DEFINE_R1TYPE(fcvt_wu_h); + DEFINE_R1TYPE(fclass_h); + DEFINE_RTYPE(feq_h); + DEFINE_RTYPE(flt_h); + DEFINE_RTYPE(fle_h); + } + + if (isa->extension_enabled(EXT_ZFHMIN)) { + DEFINE_FLOAD(flh) + DEFINE_FSTORE(fsh) + DEFINE_FR1TYPE(fcvt_h_s); + DEFINE_FR1TYPE(fcvt_h_d); + DEFINE_FR1TYPE(fcvt_h_q); + DEFINE_FR1TYPE(fcvt_s_h); + DEFINE_FR1TYPE(fcvt_d_h); + DEFINE_FR1TYPE(fcvt_q_h); + DEFINE_XFTYPE(fmv_h_x); + DEFINE_FXTYPE(fmv_x_h); + } + + if (isa->extension_enabled(EXT_ZHINXMIN)) { + DEFINE_R1TYPE(fcvt_h_s); + DEFINE_R1TYPE(fcvt_h_d); + //DEFINE_R1TYPE(fcvt_h_q); + DEFINE_R1TYPE(fcvt_s_h); + DEFINE_R1TYPE(fcvt_d_h); + //DEFINE_R1TYPE(fcvt_q_h); + } + + if (isa->extension_enabled('Q')) { + DEFINE_FLOAD(flq) + DEFINE_FSTORE(fsq) + DEFINE_FRTYPE(fadd_q); + DEFINE_FRTYPE(fsub_q); + DEFINE_FRTYPE(fmul_q); + DEFINE_FRTYPE(fdiv_q); + DEFINE_FR1TYPE(fsqrt_q); + DEFINE_FRTYPE(fmin_q); + DEFINE_FRTYPE(fmax_q); + DEFINE_FR3TYPE(fmadd_q); + DEFINE_FR3TYPE(fmsub_q); + DEFINE_FR3TYPE(fnmadd_q); + DEFINE_FR3TYPE(fnmsub_q); + DEFINE_FRTYPE(fsgnj_q); + DEFINE_FRTYPE(fsgnjn_q); + DEFINE_FRTYPE(fsgnjx_q); + DEFINE_FR1TYPE(fcvt_q_s); + DEFINE_FR1TYPE(fcvt_q_d); + DEFINE_XFTYPE(fcvt_q_l); + DEFINE_XFTYPE(fcvt_q_lu); + DEFINE_XFTYPE(fcvt_q_w); + DEFINE_XFTYPE(fcvt_q_wu); + DEFINE_XFTYPE(fcvt_q_wu); + DEFINE_FXTYPE(fcvt_l_q); + DEFINE_FXTYPE(fcvt_lu_q); + DEFINE_FXTYPE(fcvt_w_q); + DEFINE_FXTYPE(fcvt_wu_q); + DEFINE_FXTYPE(fclass_q); + DEFINE_FX2TYPE(feq_q); + DEFINE_FX2TYPE(flt_q); + DEFINE_FX2TYPE(fle_q); + } + + // ext-h + if (isa->extension_enabled('H')) { + DEFINE_XLOAD_BASE(hlv_b) + DEFINE_XLOAD_BASE(hlv_bu) + DEFINE_XLOAD_BASE(hlv_h) + DEFINE_XLOAD_BASE(hlv_hu) + DEFINE_XLOAD_BASE(hlv_w) + DEFINE_XLOAD_BASE(hlv_wu) + DEFINE_XLOAD_BASE(hlv_d) + + DEFINE_XLOAD_BASE(hlvx_hu) + DEFINE_XLOAD_BASE(hlvx_wu) + + DEFINE_XSTORE_BASE(hsv_b) + DEFINE_XSTORE_BASE(hsv_h) + DEFINE_XSTORE_BASE(hsv_w) + DEFINE_XSTORE_BASE(hsv_d) + + DEFINE_SFENCE_TYPE(hfence_gvma); + DEFINE_SFENCE_TYPE(hfence_vvma); + } + + // ext-c + if (isa->extension_enabled('C')) { + DISASM_INSN("c.ebreak", c_add, mask_rd | mask_rvc_rs2, {}); + add_insn(new disasm_insn_t("ret", match_c_jr | match_rd_ra, mask_c_jr | mask_rd | mask_rvc_imm, {})); + DISASM_INSN("c.jr", c_jr, mask_rvc_imm, {&rvc_rs1}); + DISASM_INSN("c.jalr", c_jalr, mask_rvc_imm, {&rvc_rs1}); + DISASM_INSN("c.nop", c_addi, mask_rd | mask_rvc_imm, {}); + DISASM_INSN("c.addi16sp", c_addi16sp, mask_rd, {&rvc_sp, &rvc_addi16sp_imm}); + DISASM_INSN("c.addi4spn", c_addi4spn, 0, {&rvc_rs2s, &rvc_sp, &rvc_addi4spn_imm}); + DISASM_INSN("c.li", c_li, 0, {&xrd, &rvc_imm}); + DISASM_INSN("c.lui", c_lui, 0, {&xrd, &rvc_uimm}); + DISASM_INSN("c.addi", c_addi, 0, {&xrd, &rvc_imm}); + DISASM_INSN("c.slli", c_slli, 0, {&rvc_rs1, &rvc_shamt}); + DISASM_INSN("c.srli", c_srli, 0, {&rvc_rs1s, &rvc_shamt}); + DISASM_INSN("c.srai", c_srai, 0, {&rvc_rs1s, &rvc_shamt}); + DISASM_INSN("c.andi", c_andi, 0, {&rvc_rs1s, &rvc_imm}); + DISASM_INSN("c.mv", c_mv, 0, {&xrd, &rvc_rs2}); + DISASM_INSN("c.add", c_add, 0, {&xrd, &rvc_rs2}); + DISASM_INSN("c.addw", c_addw, 0, {&rvc_rs1s, &rvc_rs2s}); + DISASM_INSN("c.sub", c_sub, 0, {&rvc_rs1s, &rvc_rs2s}); + DISASM_INSN("c.subw", c_subw, 0, {&rvc_rs1s, &rvc_rs2s}); + DISASM_INSN("c.and", c_and, 0, {&rvc_rs1s, &rvc_rs2s}); + DISASM_INSN("c.or", c_or, 0, {&rvc_rs1s, &rvc_rs2s}); + DISASM_INSN("c.xor", c_xor, 0, {&rvc_rs1s, &rvc_rs2s}); + DISASM_INSN("c.lwsp", c_lwsp, 0, {&xrd, &rvc_lwsp_address}); + DISASM_INSN("c.fld", c_fld, 0, {&rvc_fp_rs2s, &rvc_ld_address}); + DISASM_INSN("c.swsp", c_swsp, 0, {&rvc_rs2, &rvc_swsp_address}); + DISASM_INSN("c.lw", c_lw, 0, {&rvc_rs2s, &rvc_lw_address}); + DISASM_INSN("c.sw", c_sw, 0, {&rvc_rs2s, &rvc_lw_address}); + DISASM_INSN("c.beqz", c_beqz, 0, {&rvc_rs1s, &rvc_branch_target}); + DISASM_INSN("c.bnez", c_bnez, 0, {&rvc_rs1s, &rvc_branch_target}); + DISASM_INSN("c.j", c_j, 0, {&rvc_jump_target}); + DISASM_INSN("c.fldsp", c_fldsp, 0, {&frd, &rvc_ldsp_address}); + DISASM_INSN("c.fsd", c_fsd, 0, {&rvc_fp_rs2s, &rvc_ld_address}); + DISASM_INSN("c.fsdsp", c_fsdsp, 0, {&rvc_fp_rs2, &rvc_sdsp_address}); + if (isa->get_max_xlen() == 32) { + DISASM_INSN("c.flw", c_flw, 0, {&rvc_fp_rs2s, &rvc_lw_address}); + DISASM_INSN("c.flwsp", c_flwsp, 0, {&frd, &rvc_lwsp_address}); + DISASM_INSN("c.fsw", c_fsw, 0, {&rvc_fp_rs2s, &rvc_lw_address}); + DISASM_INSN("c.fswsp", c_fswsp, 0, {&rvc_fp_rs2, &rvc_swsp_address}); + DISASM_INSN("c.jal", c_jal, 0, {&rvc_jump_target}); + } else { + DISASM_INSN("c.ld", c_ld, 0, {&rvc_rs2s, &rvc_ld_address}); + DISASM_INSN("c.ldsp", c_ldsp, 0, {&xrd, &rvc_ldsp_address}); + DISASM_INSN("c.sd", c_sd, 0, {&rvc_rs2s, &rvc_ld_address}); + DISASM_INSN("c.sdsp", c_sdsp, 0, {&rvc_rs2, &rvc_sdsp_address}); + DISASM_INSN("c.addiw", c_addiw, 0, {&xrd, &rvc_imm}); + } + } + + if (isa->extension_enabled('V')) { + DISASM_INSN("vsetivli", vsetivli, 0, {&xrd, &zimm5, &v_vtype}); + DISASM_INSN("vsetvli", vsetvli, 0, {&xrd, &xrs1, &v_vtype}); + DEFINE_RTYPE(vsetvl); + + std::vector v_ld_unit = {&vd, &v_address, opt, &vm}; + std::vector v_st_unit = {&vs3, &v_address, opt, &vm}; + std::vector v_ld_stride = {&vd, &v_address, &xrs2, opt, &vm}; + std::vector v_st_stride = {&vs3, &v_address, &xrs2, opt, &vm}; + std::vector v_ld_index = {&vd, &v_address, &vs2, opt, &vm}; + std::vector v_st_index = {&vs3, &v_address, &vs2, opt, &vm}; + + add_insn(new disasm_insn_t("vlm.v", match_vlm_v, mask_vlm_v, v_ld_unit)); + add_insn(new disasm_insn_t("vsm.v", match_vsm_v, mask_vsm_v, v_st_unit)); + + // handle vector segment load/store + for (size_t elt = 0; elt <= 7; ++elt) { + const custom_fmt_t template_insn[] = { + {match_vle8_v, mask_vle8_v, "vl%se%d.v", v_ld_unit}, + {match_vse8_v, mask_vse8_v, "vs%se%d.v", v_st_unit}, + + {match_vluxei8_v, mask_vluxei8_v, "vlux%sei%d.v", v_ld_index}, + {match_vsuxei8_v, mask_vsuxei8_v, "vsux%sei%d.v", v_st_index}, + + {match_vlse8_v, mask_vlse8_v, "vls%se%d.v", v_ld_stride}, + {match_vsse8_v, mask_vsse8_v, "vss%se%d.v", v_st_stride}, + + {match_vloxei8_v, mask_vloxei8_v, "vlox%sei%d.v", v_ld_index}, + {match_vsoxei8_v, mask_vsoxei8_v, "vsox%sei%d.v", v_st_index}, + + {match_vle8ff_v, mask_vle8ff_v, "vl%se%dff.v", v_ld_unit} + }; + + reg_t elt_map[] = {0x00000000, 0x00005000, 0x00006000, 0x00007000, + 0x10000000, 0x10005000, 0x10006000, 0x10007000}; + + for (unsigned nf = 0; nf <= 7; ++nf) { + char seg_str[8] = ""; + if (nf) + sprintf(seg_str, "seg%u", nf + 1); + + for (auto item : template_insn) { + const reg_t match_nf = nf << 29; + char buf[128]; + sprintf(buf, item.fmt, seg_str, 8 << elt); + add_insn(new disasm_insn_t( + buf, + ((item.match | match_nf) & ~mask_vldst) | elt_map[elt], + item.mask | mask_nf, + item.arg + )); + } + } + + const custom_fmt_t template_insn2[] = { + {match_vl1re8_v, mask_vl1re8_v, "vl%dre%d.v", v_ld_unit}, + }; + + for (reg_t i = 0, nf = 7; i < 4; i++, nf >>= 1) { + for (auto item : template_insn2) { + const reg_t match_nf = nf << 29; + char buf[128]; + sprintf(buf, item.fmt, nf + 1, 8 << elt); + add_insn(new disasm_insn_t( + buf, + item.match | match_nf | elt_map[elt], + item.mask | mask_nf, + item.arg + )); + } + } + } + + #define DISASM_ST_WHOLE_INSN(name, nf) \ + add_insn(new disasm_insn_t(#name, match_vs1r_v | (nf << 29), \ + mask_vs1r_v | mask_nf, \ + {&vs3, &v_address})); + DISASM_ST_WHOLE_INSN(vs1r.v, 0); + DISASM_ST_WHOLE_INSN(vs2r.v, 1); + DISASM_ST_WHOLE_INSN(vs4r.v, 3); + DISASM_ST_WHOLE_INSN(vs8r.v, 7); + + #undef DISASM_ST_WHOLE_INSN + + #define DEFINE_VECTOR_V(code) add_vector_v_insn(this, #code, match_##code, mask_##code) + #define DEFINE_VECTOR_VV(code) add_vector_vv_insn(this, #code, match_##code, mask_##code) + #define DEFINE_VECTOR_VX(code) add_vector_vx_insn(this, #code, match_##code, mask_##code) + #define DEFINE_VECTOR_VF(code) add_vector_vf_insn(this, #code, match_##code, mask_##code) + #define DEFINE_VECTOR_VI(code) add_vector_vi_insn(this, #code, match_##code, mask_##code) + #define DEFINE_VECTOR_VIU(code) add_vector_viu_insn(this, #code, match_##code, mask_##code) + + #define DISASM_OPIV_VXI_INSN(name, sign, suf) \ + DEFINE_VECTOR_VV(name##_##suf##v); \ + DEFINE_VECTOR_VX(name##_##suf##x); \ + if (sign) \ + DEFINE_VECTOR_VI(name##_##suf##i); \ + else \ + DEFINE_VECTOR_VIU(name##_##suf##i) + + #define DISASM_OPIV_VX__INSN(name, sign) \ + DEFINE_VECTOR_VV(name##_vv); \ + DEFINE_VECTOR_VX(name##_vx) + + #define DISASM_OPIV__XI_INSN(name, sign) \ + DEFINE_VECTOR_VX(name##_vx); \ + if (sign) \ + DEFINE_VECTOR_VI(name##_vi); \ + else \ + DEFINE_VECTOR_VIU(name##_vi) + + #define DISASM_OPIV_V___INSN(name, sign) DEFINE_VECTOR_VV(name##_vv) + + #define DISASM_OPIV_S___INSN(name, sign) DEFINE_VECTOR_VV(name##_vs) + + #define DISASM_OPIV_W___INSN(name, sign) \ + DEFINE_VECTOR_VV(name##_wv); \ + DEFINE_VECTOR_VX(name##_wx) + + #define DISASM_OPIV_M___INSN(name, sign) DEFINE_VECTOR_VV(name##_mm) + + #define DISASM_OPIV__X__INSN(name, sign) DEFINE_VECTOR_VX(name##_vx) + + #define DEFINE_VECTOR_VVM(name) \ + add_vector_vvm_insn(this, #name, match_##name, mask_##name | mask_vm) + + #define DEFINE_VECTOR_VXM(name) \ + add_vector_vxm_insn(this, #name, match_##name, mask_##name | mask_vm) + + #define DEFINE_VECTOR_VIM(name) \ + add_vector_vim_insn(this, #name, match_##name, mask_##name | mask_vm) + + #define DISASM_OPIV_VXIM_INSN(name) \ + DEFINE_VECTOR_VVM(name##_vvm); \ + DEFINE_VECTOR_VXM(name##_vxm); \ + DEFINE_VECTOR_VIM(name##_vim) + + #define DISASM_OPIV_VX_M_INSN(name) \ + DEFINE_VECTOR_VVM(name##_vvm); \ + DEFINE_VECTOR_VXM(name##_vxm) + + //OPFVV/OPFVF + //0b00_0000 + DISASM_OPIV_VXI_INSN(vadd, 1, v); + DISASM_OPIV_VX__INSN(vsub, 1); + DISASM_OPIV__XI_INSN(vrsub, 1); + DISASM_OPIV_VX__INSN(vminu, 0); + DISASM_OPIV_VX__INSN(vmin, 1); + DISASM_OPIV_VX__INSN(vmaxu, 1); + DISASM_OPIV_VX__INSN(vmax, 0); + DISASM_OPIV_VXI_INSN(vand, 1, v); + DISASM_OPIV_VXI_INSN(vor, 1, v); + DISASM_OPIV_VXI_INSN(vxor, 1, v); + DISASM_OPIV_VXI_INSN(vrgather, 0, v); + DISASM_OPIV_V___INSN(vrgatherei16, 0); + DISASM_OPIV__XI_INSN(vslideup, 0); + DISASM_OPIV__XI_INSN(vslidedown, 0); + + //0b01_0000 + DISASM_OPIV_VXIM_INSN(vadc); + DISASM_OPIV_VX_M_INSN(vsbc); + DISASM_OPIV_VXIM_INSN(vmadc); + DISASM_OPIV_VXI_INSN(vmadc, 1, v); + DISASM_OPIV_VX_M_INSN(vmsbc); + DISASM_OPIV_VX__INSN(vmsbc, 1); + DISASM_OPIV_VXIM_INSN(vmerge); + DISASM_INSN("vmv.v.i", vmv_v_i, 0, {&vd, &v_simm5}); + DISASM_INSN("vmv.v.v", vmv_v_v, 0, {&vd, &vs1}); + DISASM_INSN("vmv.v.x", vmv_v_x, 0, {&vd, &xrs1}); + DISASM_OPIV_VXI_INSN(vmseq, 1, v); + DISASM_OPIV_VXI_INSN(vmsne, 1, v); + DISASM_OPIV_VX__INSN(vmsltu, 0); + DISASM_OPIV_VX__INSN(vmslt, 1); + DISASM_OPIV_VXI_INSN(vmsleu, 0, v); + DISASM_OPIV_VXI_INSN(vmsle, 1, v); + DISASM_OPIV__XI_INSN(vmsgtu, 0); + DISASM_OPIV__XI_INSN(vmsgt, 1); + + //0b10_0000 + DISASM_OPIV_VXI_INSN(vsaddu, 0, v); + DISASM_OPIV_VXI_INSN(vsadd, 1, v); + DISASM_OPIV_VX__INSN(vssubu, 0); + DISASM_OPIV_VX__INSN(vssub, 1); + DISASM_OPIV_VXI_INSN(vsll, 1, v); + DISASM_INSN("vmv1r.v", vmv1r_v, 0, {&vd, &vs2}); + DISASM_INSN("vmv2r.v", vmv2r_v, 0, {&vd, &vs2}); + DISASM_INSN("vmv4r.v", vmv4r_v, 0, {&vd, &vs2}); + DISASM_INSN("vmv8r.v", vmv8r_v, 0, {&vd, &vs2}); + DISASM_OPIV_VX__INSN(vsmul, 1); + DISASM_OPIV_VXI_INSN(vsrl, 0, v); + DISASM_OPIV_VXI_INSN(vsra, 0, v); + DISASM_OPIV_VXI_INSN(vssrl, 0, v); + DISASM_OPIV_VXI_INSN(vssra, 0, v); + DISASM_OPIV_VXI_INSN(vnsrl, 0, w); + DISASM_OPIV_VXI_INSN(vnsra, 0, w); + DISASM_OPIV_VXI_INSN(vnclipu, 0, w); + DISASM_OPIV_VXI_INSN(vnclip, 0, w); + + //0b11_0000 + DISASM_OPIV_S___INSN(vwredsumu, 0); + DISASM_OPIV_S___INSN(vwredsum, 1); + + //OPMVV/OPMVX + //0b00_0000 + DISASM_OPIV_VX__INSN(vaaddu, 0); + DISASM_OPIV_VX__INSN(vaadd, 0); + DISASM_OPIV_VX__INSN(vasubu, 0); + DISASM_OPIV_VX__INSN(vasub, 0); + + DISASM_OPIV_S___INSN(vredsum, 1); + DISASM_OPIV_S___INSN(vredand, 1); + DISASM_OPIV_S___INSN(vredor, 1); + DISASM_OPIV_S___INSN(vredxor, 1); + DISASM_OPIV_S___INSN(vredminu, 0); + DISASM_OPIV_S___INSN(vredmin, 1); + DISASM_OPIV_S___INSN(vredmaxu, 0); + DISASM_OPIV_S___INSN(vredmax, 1); + DISASM_OPIV__X__INSN(vslide1up, 1); + DISASM_OPIV__X__INSN(vslide1down,1); + + //0b01_0000 + //VWXUNARY0 + DISASM_INSN("vmv.x.s", vmv_x_s, 0, {&xrd, &vs2}); + DISASM_INSN("vcpop.m", vcpop_m, 0, {&xrd, &vs2, opt, &vm}); + DISASM_INSN("vfirst.m", vfirst_m, 0, {&xrd, &vs2, opt, &vm}); + + //VRXUNARY0 + DISASM_INSN("vmv.s.x", vmv_s_x, 0, {&vd, &xrs1}); + + //VXUNARY0 + DEFINE_VECTOR_V(vzext_vf2); + DEFINE_VECTOR_V(vsext_vf2); + DEFINE_VECTOR_V(vzext_vf4); + DEFINE_VECTOR_V(vsext_vf4); + DEFINE_VECTOR_V(vzext_vf8); + DEFINE_VECTOR_V(vsext_vf8); + + //VMUNARY0 + DEFINE_VECTOR_V(vmsbf_m); + DEFINE_VECTOR_V(vmsof_m); + DEFINE_VECTOR_V(vmsif_m); + DEFINE_VECTOR_V(viota_m); + DISASM_INSN("vid.v", vid_v, 0, {&vd, opt, &vm}); + + DISASM_INSN("vid.v", vid_v, 0, {&vd, opt, &vm}); + + DISASM_INSN("vcompress.vm", vcompress_vm, 0, {&vd, &vs2, &vs1}); + + DISASM_OPIV_M___INSN(vmandn, 1); + DISASM_OPIV_M___INSN(vmand, 1); + DISASM_OPIV_M___INSN(vmor, 1); + DISASM_OPIV_M___INSN(vmxor, 1); + DISASM_OPIV_M___INSN(vmorn, 1); + DISASM_OPIV_M___INSN(vmnand, 1); + DISASM_OPIV_M___INSN(vmnor, 1); + DISASM_OPIV_M___INSN(vmxnor, 1); + + //0b10_0000 + DISASM_OPIV_VX__INSN(vdivu, 0); + DISASM_OPIV_VX__INSN(vdiv, 1); + DISASM_OPIV_VX__INSN(vremu, 0); + DISASM_OPIV_VX__INSN(vrem, 1); + DISASM_OPIV_VX__INSN(vmulhu, 0); + DISASM_OPIV_VX__INSN(vmul, 1); + DISASM_OPIV_VX__INSN(vmulhsu, 0); + DISASM_OPIV_VX__INSN(vmulh, 1); + DISASM_OPIV_VX__INSN(vmadd, 1); + DISASM_OPIV_VX__INSN(vnmsub, 1); + DISASM_OPIV_VX__INSN(vmacc, 1); + DISASM_OPIV_VX__INSN(vnmsac, 1); + + //0b11_0000 + DISASM_OPIV_VX__INSN(vwaddu, 0); + DISASM_OPIV_VX__INSN(vwadd, 1); + DISASM_OPIV_VX__INSN(vwsubu, 0); + DISASM_OPIV_VX__INSN(vwsub, 1); + DISASM_OPIV_W___INSN(vwaddu, 0); + DISASM_OPIV_W___INSN(vwadd, 1); + DISASM_OPIV_W___INSN(vwsubu, 0); + DISASM_OPIV_W___INSN(vwsub, 1); + DISASM_OPIV_VX__INSN(vwmulu, 0); + DISASM_OPIV_VX__INSN(vwmulsu, 0); + DISASM_OPIV_VX__INSN(vwmul, 1); + DISASM_OPIV_VX__INSN(vwmaccu, 0); + DISASM_OPIV_VX__INSN(vwmacc, 1); + DISASM_OPIV__X__INSN(vwmaccus, 1); + DISASM_OPIV_VX__INSN(vwmaccsu, 0); + + #undef DISASM_OPIV_VXI_INSN + #undef DISASM_OPIV_VX__INSN + #undef DISASM_OPIV__XI_INSN + #undef DISASM_OPIV_V___INSN + #undef DISASM_OPIV_S___INSN + #undef DISASM_OPIV_W___INSN + #undef DISASM_OPIV_M___INSN + #undef DISASM_OPIV__X__INSN + #undef DISASM_OPIV_VXIM_INSN + #undef DISASM_OPIV_VX_M_INSN + + #define DISASM_OPIV_VF_INSN(name) \ + DEFINE_VECTOR_VV(name##_vv); \ + DEFINE_VECTOR_VF(name##_vf) + + #define DISASM_OPIV_WF_INSN(name) \ + DEFINE_VECTOR_VV(name##_wv); \ + DEFINE_VECTOR_VF(name##_wf) + + #define DISASM_OPIV_S__INSN(name) \ + DEFINE_VECTOR_VV(name##_vs) + + #define DISASM_OPIV__F_INSN(name) \ + DEFINE_VECTOR_VF(name##_vf) + + #define DISASM_VFUNARY0_INSN(name, suf) \ + DEFINE_VECTOR_V(name##cvt_rtz_xu_f_##suf); \ + DEFINE_VECTOR_V(name##cvt_rtz_x_f_##suf); \ + DEFINE_VECTOR_V(name##cvt_xu_f_##suf); \ + DEFINE_VECTOR_V(name##cvt_x_f_##suf); \ + DEFINE_VECTOR_V(name##cvt_f_xu_##suf); \ + DEFINE_VECTOR_V(name##cvt_f_x_##suf) + + //OPFVV/OPFVF + //0b00_0000 + DISASM_OPIV_VF_INSN(vfadd); + DISASM_OPIV_S__INSN(vfredusum); + DISASM_OPIV_VF_INSN(vfsub); + DISASM_OPIV_S__INSN(vfredosum); + DISASM_OPIV_VF_INSN(vfmin); + DISASM_OPIV_S__INSN(vfredmin); + DISASM_OPIV_VF_INSN(vfmax); + DISASM_OPIV_S__INSN(vfredmax); + DISASM_OPIV_VF_INSN(vfsgnj); + DISASM_OPIV_VF_INSN(vfsgnjn); + DISASM_OPIV_VF_INSN(vfsgnjx); + DISASM_INSN("vfmv.f.s", vfmv_f_s, 0, {&frd, &vs2}); + DISASM_INSN("vfmv.s.f", vfmv_s_f, mask_vfmv_s_f, {&vd, &frs1}); + DISASM_OPIV__F_INSN(vfslide1up); + DISASM_OPIV__F_INSN(vfslide1down); + + //0b01_0000 + DISASM_INSN("vfmerge.vfm", vfmerge_vfm, 0, {&vd, &vs2, &frs1, &v0}); + DISASM_INSN("vfmv.v.f", vfmv_v_f, 0, {&vd, &frs1}); + DISASM_OPIV_VF_INSN(vmfeq); + DISASM_OPIV_VF_INSN(vmfle); + DISASM_OPIV_VF_INSN(vmflt); + DISASM_OPIV_VF_INSN(vmfne); + DISASM_OPIV__F_INSN(vmfgt); + DISASM_OPIV__F_INSN(vmfge); + + //0b10_0000 + DISASM_OPIV_VF_INSN(vfdiv); + DISASM_OPIV__F_INSN(vfrdiv); + + //vfunary0 + DISASM_VFUNARY0_INSN(vf, v); + DISASM_VFUNARY0_INSN(vfw, v); + DEFINE_VECTOR_V(vfwcvt_f_f_v); + + DISASM_VFUNARY0_INSN(vfn, w); + DEFINE_VECTOR_V(vfncvt_f_f_w); + DEFINE_VECTOR_V(vfncvt_rod_f_f_w); + + //vfunary1 + DEFINE_VECTOR_V(vfsqrt_v); + DEFINE_VECTOR_V(vfrsqrt7_v); + DEFINE_VECTOR_V(vfrec7_v); + DEFINE_VECTOR_V(vfclass_v); + + DISASM_OPIV_VF_INSN(vfmul); + DISASM_OPIV__F_INSN(vfrsub); + DISASM_OPIV_VF_INSN(vfmadd); + DISASM_OPIV_VF_INSN(vfnmadd); + DISASM_OPIV_VF_INSN(vfmsub); + DISASM_OPIV_VF_INSN(vfnmsub); + DISASM_OPIV_VF_INSN(vfmacc); + DISASM_OPIV_VF_INSN(vfnmacc); + DISASM_OPIV_VF_INSN(vfmsac); + DISASM_OPIV_VF_INSN(vfnmsac); + + //0b11_0000 + DISASM_OPIV_VF_INSN(vfwadd); + DISASM_OPIV_S__INSN(vfwredusum); + DISASM_OPIV_VF_INSN(vfwsub); + DISASM_OPIV_S__INSN(vfwredosum); + DISASM_OPIV_WF_INSN(vfwadd); + DISASM_OPIV_WF_INSN(vfwsub); + DISASM_OPIV_VF_INSN(vfwmul); + DISASM_OPIV_VF_INSN(vfwmacc); + DISASM_OPIV_VF_INSN(vfwnmacc); + DISASM_OPIV_VF_INSN(vfwmsac); + DISASM_OPIV_VF_INSN(vfwnmsac); + + #undef DISASM_OPIV_VF_INSN + #undef DISASM_OPIV__F_INSN + #undef DISASM_OPIV_S__INSN + #undef DISASM_OPIV_W__INSN + #undef DISASM_VFUNARY0_INSN + + // vector amo + std::vector v_fmt_amo_wd = {&vd, &v_address, &vs2, &vd, opt, &vm}; + std::vector v_fmt_amo = {&x0, &v_address, &vs2, &vd, opt, &vm}; + for (size_t elt = 0; elt <= 3; ++elt) { + const custom_fmt_t template_insn[] = { + {match_vamoaddei8_v | mask_wd, mask_vamoaddei8_v | mask_wd, + "%sei%d.v", v_fmt_amo_wd}, + {match_vamoaddei8_v, mask_vamoaddei8_v | mask_wd, + "%sei%d.v", v_fmt_amo}, + }; + std::pair amo_map[] = { + {"vamoswap", 0x01ul << 27}, + {"vamoadd", 0x00ul << 27}, + {"vamoxor", 0x04ul << 27}, + {"vamoand", 0x0cul << 27}, + {"vamoor", 0x08ul << 27}, + {"vamomin", 0x10ul << 27}, + {"vamomax", 0x14ul << 27}, + {"vamominu", 0x18ul << 27}, + {"vamomaxu", 0x1cul << 27}}; + const reg_t elt_map[] = {0x0ul << 12, 0x5ul << 12, + 0x6ul <<12, 0x7ul << 12}; + + for (size_t idx = 0; idx < sizeof(amo_map) / sizeof(amo_map[0]); ++idx) { + for (auto item : template_insn) { + char buf[128]; + sprintf(buf, item.fmt, amo_map[idx].first, 8 << elt); + add_insn(new disasm_insn_t(buf, + item.match | amo_map[idx].second | elt_map[elt], + item.mask, + item.arg)); + } + } + } + } + +#define DEFINE_PI3TYPE(code) add_pitype3_insn(this, #code, match_##code, mask_##code); +#define DEFINE_PI4TYPE(code) add_pitype4_insn(this, #code, match_##code, mask_##code); +#define DEFINE_PI5TYPE(code) add_pitype5_insn(this, #code, match_##code, mask_##code); +#define DEFINE_PI6TYPE(code) add_pitype6_insn(this, #code, match_##code, mask_##code); + +#define DISASM_8_AND_16_RINSN(code) \ + DEFINE_RTYPE(code##8); \ + DEFINE_RTYPE(code##16); + +#define DISASM_8_AND_16_RINSN_ROUND(code) \ + DEFINE_RTYPE(code##8_u); \ + DEFINE_RTYPE(code##16_u); + +#define DISASM_8_AND_16_PIINSN(code) \ + DEFINE_PI3TYPE(code##8); \ + DEFINE_PI4TYPE(code##16); + +#define DISASM_8_AND_16_PIINSN_ROUND(code) \ + DEFINE_PI3TYPE(code##8_u); \ + DEFINE_PI4TYPE(code##16_u); + +#define DISASM_RINSN_AND_ROUND(code) \ + DEFINE_RTYPE(code); \ + DEFINE_RTYPE(code##_u); \ + + if (isa->extension_enabled(EXT_ZMMUL)) { + DEFINE_RTYPE(mul); + DEFINE_RTYPE(mulh); + DEFINE_RTYPE(mulhu); + DEFINE_RTYPE(mulhsu); + DEFINE_RTYPE(mulw); + } + + if (isa->extension_enabled(EXT_ZBPBO)) { + DEFINE_RTYPE(min); + DEFINE_RTYPE(max); + DEFINE_R3TYPE(cmix); + DEFINE_RTYPE(pack); + DEFINE_RTYPE(packu); + add_insn(new disasm_insn_t("rev", match_grevi | ((isa->get_max_xlen() - 1) << imm_shift), mask_grevi | mask_imm, {&xrd, &xrs1})); + add_insn(new disasm_insn_t("rev8.h", match_grevi | (0x8 << imm_shift), mask_grevi | mask_imm, {&xrd, &xrs1})); // swap16 + if (isa->get_max_xlen() == 32) { + DEFINE_R1TYPE(clz); + DEFINE_R3TYPE(fsr); + DEFINE_R3TYPE(fsri); + } else { + DEFINE_R3TYPE(fsrw); + } + } + + if (isa->extension_enabled(EXT_ZPSFOPERAND)) { + DEFINE_RTYPE(smal) + DEFINE_RTYPE(radd64); + DEFINE_RTYPE(uradd64); + DEFINE_RTYPE(kadd64); + DEFINE_RTYPE(ukadd64); + DEFINE_RTYPE(rsub64); + DEFINE_RTYPE(ursub64); + DEFINE_RTYPE(ksub64); + DEFINE_RTYPE(uksub64); + DEFINE_RTYPE(smar64); + DEFINE_RTYPE(smsr64); + DEFINE_RTYPE(umar64); + DEFINE_RTYPE(umsr64); + DEFINE_RTYPE(kmar64); + DEFINE_RTYPE(kmsr64); + DEFINE_RTYPE(ukmar64); + DEFINE_RTYPE(ukmsr64); + DEFINE_RTYPE(smalbb); + DEFINE_RTYPE(smalbt); + DEFINE_RTYPE(smaltt); + DEFINE_RTYPE(smalda); + DEFINE_RTYPE(smalxda); + DEFINE_RTYPE(smalds); + DEFINE_RTYPE(smaldrs); + DEFINE_RTYPE(smalxds); + DEFINE_RTYPE(smslda); + DEFINE_RTYPE(smslxda); + DEFINE_RTYPE(mulr64); + DEFINE_RTYPE(mulsr64); + if (isa->get_max_xlen() == 32) { + DEFINE_RTYPE(add64); + DEFINE_RTYPE(sub64); + } + } + + if (isa->extension_enabled(EXT_ZPN)) { + DISASM_8_AND_16_RINSN(add); + DISASM_8_AND_16_RINSN(radd); + DISASM_8_AND_16_RINSN(uradd); + DISASM_8_AND_16_RINSN(kadd); + DISASM_8_AND_16_RINSN(ukadd); + DISASM_8_AND_16_RINSN(sub); + DISASM_8_AND_16_RINSN(rsub); + DISASM_8_AND_16_RINSN(ursub); + DISASM_8_AND_16_RINSN(ksub); + DISASM_8_AND_16_RINSN(uksub); + DEFINE_RTYPE(cras16); + DEFINE_RTYPE(rcras16); + DEFINE_RTYPE(urcras16); + DEFINE_RTYPE(kcras16); + DEFINE_RTYPE(ukcras16); + DEFINE_RTYPE(crsa16); + DEFINE_RTYPE(rcrsa16); + DEFINE_RTYPE(urcrsa16); + DEFINE_RTYPE(kcrsa16); + DEFINE_RTYPE(ukcrsa16); + DEFINE_RTYPE(stas16); + DEFINE_RTYPE(rstas16); + DEFINE_RTYPE(urstas16); + DEFINE_RTYPE(kstas16); + DEFINE_RTYPE(ukstas16); + DEFINE_RTYPE(stsa16); + DEFINE_RTYPE(rstsa16); + DEFINE_RTYPE(urstsa16); + DEFINE_RTYPE(kstsa16); + DEFINE_RTYPE(ukstsa16); + DISASM_8_AND_16_RINSN(sra); + DISASM_8_AND_16_RINSN(srl); + DISASM_8_AND_16_RINSN(sll); + DISASM_8_AND_16_RINSN(ksll); + DISASM_8_AND_16_RINSN(kslra); + DISASM_8_AND_16_PIINSN(srai); + DISASM_8_AND_16_PIINSN(srli); + DISASM_8_AND_16_PIINSN(slli); + DISASM_8_AND_16_PIINSN(kslli); + DISASM_8_AND_16_RINSN_ROUND(sra); + DISASM_8_AND_16_RINSN_ROUND(srl); + DISASM_8_AND_16_RINSN_ROUND(kslra); + DISASM_8_AND_16_PIINSN_ROUND(srai); + DISASM_8_AND_16_PIINSN_ROUND(srli); + + DISASM_8_AND_16_RINSN(cmpeq); + DISASM_8_AND_16_RINSN(scmplt); + DISASM_8_AND_16_RINSN(scmple); + DISASM_8_AND_16_RINSN(ucmplt); + DISASM_8_AND_16_RINSN(ucmple); + + DISASM_8_AND_16_RINSN(smul); + DISASM_8_AND_16_RINSN(smulx); + DISASM_8_AND_16_RINSN(umul); + DISASM_8_AND_16_RINSN(umulx); + DISASM_8_AND_16_RINSN(khm); + DISASM_8_AND_16_RINSN(khmx); + + DISASM_8_AND_16_RINSN(smin); + DISASM_8_AND_16_RINSN(umin); + DISASM_8_AND_16_RINSN(smax); + DISASM_8_AND_16_RINSN(umax); + DISASM_8_AND_16_PIINSN(sclip); + DISASM_8_AND_16_PIINSN(uclip); + DEFINE_R1TYPE(kabs16); + DEFINE_R1TYPE(clrs16); + DEFINE_R1TYPE(clz16); + DEFINE_R1TYPE(kabs8); + DEFINE_R1TYPE(clrs8); + DEFINE_R1TYPE(clz8); + + DEFINE_R1TYPE(sunpkd810); + DEFINE_R1TYPE(sunpkd820); + DEFINE_R1TYPE(sunpkd830); + DEFINE_R1TYPE(sunpkd831); + DEFINE_R1TYPE(sunpkd832); + DEFINE_R1TYPE(zunpkd810); + DEFINE_R1TYPE(zunpkd820); + DEFINE_R1TYPE(zunpkd830); + DEFINE_R1TYPE(zunpkd831); + DEFINE_R1TYPE(zunpkd832); + + DEFINE_RTYPE(pkbb16); + DEFINE_RTYPE(pkbt16); + DEFINE_RTYPE(pktb16); + DEFINE_RTYPE(pktt16); + DISASM_RINSN_AND_ROUND(smmul); + DISASM_RINSN_AND_ROUND(kmmac); + DISASM_RINSN_AND_ROUND(kmmsb); + DISASM_RINSN_AND_ROUND(kwmmul); + DISASM_RINSN_AND_ROUND(smmwb); + DISASM_RINSN_AND_ROUND(smmwt); + DISASM_RINSN_AND_ROUND(kmmawb); + DISASM_RINSN_AND_ROUND(kmmawt); + DISASM_RINSN_AND_ROUND(kmmwb2); + DISASM_RINSN_AND_ROUND(kmmwt2); + DISASM_RINSN_AND_ROUND(kmmawb2); + DISASM_RINSN_AND_ROUND(kmmawt2); + DEFINE_RTYPE(smbb16) + DEFINE_RTYPE(smbt16) + DEFINE_RTYPE(smtt16) + DEFINE_RTYPE(kmda) + DEFINE_RTYPE(kmxda) + DEFINE_RTYPE(smds) + DEFINE_RTYPE(smdrs) + DEFINE_RTYPE(smxds) + DEFINE_RTYPE(kmabb) + DEFINE_RTYPE(kmabt) + DEFINE_RTYPE(kmatt) + DEFINE_RTYPE(kmada) + DEFINE_RTYPE(kmaxda) + DEFINE_RTYPE(kmads) + DEFINE_RTYPE(kmadrs) + DEFINE_RTYPE(kmaxds) + DEFINE_RTYPE(kmsda) + DEFINE_RTYPE(kmsxda) + DEFINE_RTYPE(sclip32) + DEFINE_RTYPE(uclip32) + DEFINE_R1TYPE(clrs32); + DEFINE_R1TYPE(clz32); + DEFINE_RTYPE(pbsad); + DEFINE_RTYPE(pbsada); + DEFINE_RTYPE(smaqa); + DEFINE_RTYPE(umaqa); + DEFINE_RTYPE(smaqa_su); + + DEFINE_RTYPE(kaddh); + DEFINE_RTYPE(ksubh); + DEFINE_RTYPE(khmbb); + DEFINE_RTYPE(khmbt); + DEFINE_RTYPE(khmtt); + DEFINE_RTYPE(ukaddh); + DEFINE_RTYPE(uksubh); + DEFINE_RTYPE(kaddw); + DEFINE_RTYPE(ukaddw); + DEFINE_RTYPE(ksubw); + DEFINE_RTYPE(uksubw); + DEFINE_RTYPE(kdmbb); + DEFINE_RTYPE(kdmbt); + DEFINE_RTYPE(kdmtt); + DEFINE_RTYPE(kslraw); + DEFINE_RTYPE(kslraw_u); + DEFINE_RTYPE(ksllw); + DEFINE_PI5TYPE(kslliw); + DEFINE_RTYPE(kdmabb); + DEFINE_RTYPE(kdmabt); + DEFINE_RTYPE(kdmatt); + DEFINE_RTYPE(kabsw); + DEFINE_RTYPE(raddw); + DEFINE_RTYPE(uraddw); + DEFINE_RTYPE(rsubw); + DEFINE_RTYPE(ursubw); + DEFINE_RTYPE(msubr32); + DEFINE_RTYPE(ave); + DEFINE_RTYPE(sra_u); + DEFINE_PI5TYPE(srai_u); + DEFINE_PI3TYPE(insb); + DEFINE_RTYPE(maddr32) + + if (isa->get_max_xlen() == 64) { + DEFINE_RTYPE(add32); + DEFINE_RTYPE(radd32); + DEFINE_RTYPE(uradd32); + DEFINE_RTYPE(kadd32); + DEFINE_RTYPE(ukadd32); + DEFINE_RTYPE(sub32); + DEFINE_RTYPE(rsub32); + DEFINE_RTYPE(ursub32); + DEFINE_RTYPE(ksub32); + DEFINE_RTYPE(uksub32); + DEFINE_RTYPE(cras32); + DEFINE_RTYPE(rcras32); + DEFINE_RTYPE(urcras32); + DEFINE_RTYPE(kcras32); + DEFINE_RTYPE(ukcras32); + DEFINE_RTYPE(crsa32); + DEFINE_RTYPE(rcrsa32); + DEFINE_RTYPE(urcrsa32); + DEFINE_RTYPE(kcrsa32); + DEFINE_RTYPE(ukcrsa32); + DEFINE_RTYPE(stas32); + DEFINE_RTYPE(rstas32); + DEFINE_RTYPE(urstas32); + DEFINE_RTYPE(kstas32); + DEFINE_RTYPE(ukstas32); + DEFINE_RTYPE(stsa32); + DEFINE_RTYPE(rstsa32); + DEFINE_RTYPE(urstsa32); + DEFINE_RTYPE(kstsa32); + DEFINE_RTYPE(ukstsa32); + DEFINE_RTYPE(sra32); + DEFINE_PI5TYPE(srai32); + DEFINE_RTYPE(sra32_u); + DEFINE_PI5TYPE(srai32_u); + DEFINE_RTYPE(srl32); + DEFINE_PI5TYPE(srli32); + DEFINE_RTYPE(srl32_u); + DEFINE_PI5TYPE(srli32_u); + DEFINE_RTYPE(sll32); + DEFINE_PI5TYPE(slli32); + DEFINE_RTYPE(ksll32); + DEFINE_PI5TYPE(kslli32); + DEFINE_RTYPE(kslra32); + DEFINE_RTYPE(kslra32_u); + DEFINE_RTYPE(smin32); + DEFINE_RTYPE(umin32); + DEFINE_RTYPE(smax32); + DEFINE_RTYPE(umax32); + DEFINE_R1TYPE(kabs32); + DEFINE_RTYPE(khmbb16); + DEFINE_RTYPE(khmbt16); + DEFINE_RTYPE(khmtt16); + DEFINE_RTYPE(kdmbb16); + DEFINE_RTYPE(kdmbt16); + DEFINE_RTYPE(kdmtt16); + DEFINE_RTYPE(kdmabb16); + DEFINE_RTYPE(kdmabt16); + DEFINE_RTYPE(kdmatt16); + DEFINE_RTYPE(smbt32); + DEFINE_RTYPE(smtt32); + DEFINE_RTYPE(kmabb32); + DEFINE_RTYPE(kmabt32); + DEFINE_RTYPE(kmatt32); + DEFINE_RTYPE(kmda32); + DEFINE_RTYPE(kmxda32); + DEFINE_RTYPE(kmaxda32); + DEFINE_RTYPE(kmads32); + DEFINE_RTYPE(kmadrs32); + DEFINE_RTYPE(kmaxds32); + DEFINE_RTYPE(kmsda32); + DEFINE_RTYPE(kmsxda32); + DEFINE_RTYPE(smds32); + DEFINE_RTYPE(smdrs32); + DEFINE_RTYPE(smxds32); + DEFINE_PI5TYPE(sraiw_u); + DEFINE_RTYPE(pkbb32); + DEFINE_RTYPE(pkbt32); + DEFINE_RTYPE(pktb32); + DEFINE_RTYPE(pktt32); + } + } + + if (isa->extension_enabled(EXT_XZBP)) { + DEFINE_ITYPE_SHIFT(grevi); + DEFINE_ITYPE_SHIFT(gorci); + DEFINE_RTYPE(pack); + DEFINE_RTYPE(packh); + DEFINE_RTYPE(packu); + DEFINE_RTYPE(grev); + DEFINE_RTYPE(gorc); + DEFINE_RTYPE(xperm4); + DEFINE_RTYPE(xperm8); + DEFINE_RTYPE(xperm16); + DEFINE_RTYPE(xperm32); + } + + if (isa->extension_enabled(EXT_XZBP) || + isa->extension_enabled(EXT_XZBE) || + isa->extension_enabled(EXT_XZBF)) { + if(isa->get_max_xlen() == 64) { + DEFINE_RTYPE(packw); + } + } + + if (isa->extension_enabled(EXT_XZBT)) { + DEFINE_R3TYPE(cmix); + DEFINE_R3TYPE(fsr); + DEFINE_R3TYPE(fsri); + if(isa->get_max_xlen() == 64) { + DEFINE_R3TYPE(fsriw); + DEFINE_R3TYPE(fsrw); + } + } + + if (isa->extension_enabled(EXT_ZICBOM)) { + DISASM_INSN("cbo.clean", cbo_clean, 0, {&xrs1}); + DISASM_INSN("cbo.flush", cbo_flush, 0, {&xrs1}); + DISASM_INSN("cbo.inval", cbo_inval, 0, {&xrs1}); + } + + if (isa->extension_enabled(EXT_ZICBOZ)) { + DISASM_INSN("cbo.zero", cbo_zero, 0, {&xrs1}); + } + + if (isa->extension_enabled(EXT_ZKND) || + isa->extension_enabled(EXT_ZKNE)) { + DISASM_INSN("aes64ks1i", aes64ks1i, 0, {&xrd, &xrs1, &rcon}); + DEFINE_RTYPE(aes64ks2); + } + + if (isa->extension_enabled(EXT_ZKND)) { + if(isa->get_max_xlen() == 64) { + DEFINE_RTYPE(aes64ds); + DEFINE_RTYPE(aes64dsm); + DEFINE_R1TYPE(aes64im); + } else if (isa->get_max_xlen() == 32) { + DISASM_INSN("aes32dsi", aes32dsi, 0, {&xrd, &xrs1, &xrs2, &bs}); + DISASM_INSN("aes32dsmi", aes32dsmi, 0, {&xrd, &xrs1, &xrs2, &bs}); + } + } + + if (isa->extension_enabled(EXT_ZKNE)) { + if(isa->get_max_xlen() == 64) { + DEFINE_RTYPE(aes64es); + DEFINE_RTYPE(aes64esm); + } else if (isa->get_max_xlen() == 32) { + DISASM_INSN("aes32esi", aes32esi, 0, {&xrd, &xrs1, &xrs2, &bs}); + DISASM_INSN("aes32esmi", aes32esmi, 0, {&xrd, &xrs1, &xrs2, &bs}); + } + } + + if (isa->extension_enabled(EXT_ZKNH)) { + DEFINE_R1TYPE(sha256sig0); + DEFINE_R1TYPE(sha256sig1); + DEFINE_R1TYPE(sha256sum0); + DEFINE_R1TYPE(sha256sum1); + if(isa->get_max_xlen() == 64) { + DEFINE_R1TYPE(sha512sig0); + DEFINE_R1TYPE(sha512sig1); + DEFINE_R1TYPE(sha512sum0); + DEFINE_R1TYPE(sha512sum1); + } else if (isa->get_max_xlen() == 32) { + DEFINE_RTYPE(sha512sig0h); + DEFINE_RTYPE(sha512sig0l); + DEFINE_RTYPE(sha512sig1h); + DEFINE_RTYPE(sha512sig1l); + DEFINE_RTYPE(sha512sum0r); + DEFINE_RTYPE(sha512sum1r); + } + } + + if (isa->extension_enabled(EXT_ZKSED)) { + DISASM_INSN("sm4ed", sm4ed, 0, {&xrd, &xrs1, &xrs2, &bs}); + DISASM_INSN("sm4ks", sm4ks, 0, {&xrd, &xrs1, &xrs2, &bs}); + } + + if (isa->extension_enabled(EXT_ZKSH)) { + DEFINE_R1TYPE(sm3p0); + DEFINE_R1TYPE(sm3p1); + } + +} + +disassembler_t::disassembler_t(const isa_parser_t *isa) +{ + // highest priority: instructions explicitly enabled + add_instructions(isa); + + // next-highest priority: other instructions in same base ISA + std::string fallback_isa_string = std::string("rv") + std::to_string(isa->get_max_xlen()) + + "gcv_zfh_zba_zbb_zbc_zbs_zkn_zkr_zks_xbitmanip"; + isa_parser_t fallback_isa(fallback_isa_string.c_str(), DEFAULT_PRIV); + add_instructions(&fallback_isa); + + // finally: instructions with known opcodes but unknown arguments + add_unknown_insns(this); +} + +const disasm_insn_t* disassembler_t::probe_once(insn_t insn, size_t idx) const +{ + for (size_t j = 0; j < chain[idx].size(); j++) + if(*chain[idx][j] == insn) + return chain[idx][j]; + + return NULL; +} + +const disasm_insn_t* disassembler_t::lookup(insn_t insn) const +{ + if (auto p = probe_once(insn, hash(insn.bits(), MASK1))) + return p; + + if (auto p = probe_once(insn, hash(insn.bits(), MASK2))) + return p; + + return probe_once(insn, HASH_SIZE); +} + +void NOINLINE disassembler_t::add_insn(disasm_insn_t* insn) +{ + size_t idx = + (insn->get_mask() & MASK1) == MASK1 ? hash(insn->get_match(), MASK1) : + (insn->get_mask() & MASK2) == MASK2 ? hash(insn->get_match(), MASK2) : + HASH_SIZE; + + chain[idx].push_back(insn); +} + +disassembler_t::~disassembler_t() +{ + for (size_t i = 0; i < HASH_SIZE+1; i++) + for (size_t j = 0; j < chain[i].size(); j++) + delete chain[i][j]; +} diff --git a/vendor/riscv-isa-sim/disasm/disasm.mk.in b/vendor/riscv-isa-sim/disasm/disasm.mk.in new file mode 100644 index 00000000..9eafb12f --- /dev/null +++ b/vendor/riscv-isa-sim/disasm/disasm.mk.in @@ -0,0 +1,5 @@ +disasm_srcs = \ + disasm.cc \ + regnames.cc \ + +disasm_install_lib = yes diff --git a/vendor/riscv-isa-sim/disasm/regnames.cc b/vendor/riscv-isa-sim/disasm/regnames.cc new file mode 100644 index 00000000..0a7fd4d2 --- /dev/null +++ b/vendor/riscv-isa-sim/disasm/regnames.cc @@ -0,0 +1,33 @@ +// See LICENSE for license details. + +#include "disasm.h" + +const char* xpr_name[] = { + "zero", "ra", "sp", "gp", "tp", "t0", "t1", "t2", + "s0", "s1", "a0", "a1", "a2", "a3", "a4", "a5", + "a6", "a7", "s2", "s3", "s4", "s5", "s6", "s7", + "s8", "s9", "s10", "s11", "t3", "t4", "t5", "t6" +}; + +const char* fpr_name[] = { + "ft0", "ft1", "ft2", "ft3", "ft4", "ft5", "ft6", "ft7", + "fs0", "fs1", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5", + "fa6", "fa7", "fs2", "fs3", "fs4", "fs5", "fs6", "fs7", + "fs8", "fs9", "fs10", "fs11", "ft8", "ft9", "ft10", "ft11" +}; + +const char* vr_name[] = { + "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", + "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", + "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", + "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" +}; + +const char* csr_name(int which) { + switch (which) { + #define DECLARE_CSR(name, number) case number: return #name; + #include "encoding.h" + #undef DECLARE_CSR + } + return "unknown-csr"; +} diff --git a/vendor/riscv-isa-sim/fdt/fdt.ac b/vendor/riscv-isa-sim/fdt/fdt.ac new file mode 100644 index 00000000..e69de29b diff --git a/vendor/riscv-isa-sim/fdt/fdt.c b/vendor/riscv-isa-sim/fdt/fdt.c new file mode 100644 index 00000000..16fd0612 --- /dev/null +++ b/vendor/riscv-isa-sim/fdt/fdt.c @@ -0,0 +1,291 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) +/* + * libfdt - Flat Device Tree manipulation + * Copyright (C) 2006 David Gibson, IBM Corporation. + */ +#include "libfdt_env.h" + +#include "fdt.h" +#include "libfdt.h" + +#include "libfdt_internal.h" + +/* + * Minimal sanity check for a read-only tree. fdt_ro_probe_() checks + * that the given buffer contains what appears to be a flattened + * device tree with sane information in its header. + */ +int32_t fdt_ro_probe_(const void *fdt) +{ + uint32_t totalsize = fdt_totalsize(fdt); + + if (fdt_magic(fdt) == FDT_MAGIC) { + /* Complete tree */ + if (fdt_version(fdt) < FDT_FIRST_SUPPORTED_VERSION) + return -FDT_ERR_BADVERSION; + if (fdt_last_comp_version(fdt) > FDT_LAST_SUPPORTED_VERSION) + return -FDT_ERR_BADVERSION; + } else if (fdt_magic(fdt) == FDT_SW_MAGIC) { + /* Unfinished sequential-write blob */ + if (fdt_size_dt_struct(fdt) == 0) + return -FDT_ERR_BADSTATE; + } else { + return -FDT_ERR_BADMAGIC; + } + + if (totalsize < INT32_MAX) + return totalsize; + else + return -FDT_ERR_TRUNCATED; +} + +static int check_off_(uint32_t hdrsize, uint32_t totalsize, uint32_t off) +{ + return (off >= hdrsize) && (off <= totalsize); +} + +static int check_block_(uint32_t hdrsize, uint32_t totalsize, + uint32_t base, uint32_t size) +{ + if (!check_off_(hdrsize, totalsize, base)) + return 0; /* block start out of bounds */ + if ((base + size) < base) + return 0; /* overflow */ + if (!check_off_(hdrsize, totalsize, base + size)) + return 0; /* block end out of bounds */ + return 1; +} + +size_t fdt_header_size_(uint32_t version) +{ + if (version <= 1) + return FDT_V1_SIZE; + else if (version <= 2) + return FDT_V2_SIZE; + else if (version <= 3) + return FDT_V3_SIZE; + else if (version <= 16) + return FDT_V16_SIZE; + else + return FDT_V17_SIZE; +} + +int fdt_check_header(const void *fdt) +{ + size_t hdrsize; + + if (fdt_magic(fdt) != FDT_MAGIC) + return -FDT_ERR_BADMAGIC; + hdrsize = fdt_header_size(fdt); + if ((fdt_version(fdt) < FDT_FIRST_SUPPORTED_VERSION) + || (fdt_last_comp_version(fdt) > FDT_LAST_SUPPORTED_VERSION)) + return -FDT_ERR_BADVERSION; + if (fdt_version(fdt) < fdt_last_comp_version(fdt)) + return -FDT_ERR_BADVERSION; + + if ((fdt_totalsize(fdt) < hdrsize) + || (fdt_totalsize(fdt) > INT_MAX)) + return -FDT_ERR_TRUNCATED; + + /* Bounds check memrsv block */ + if (!check_off_(hdrsize, fdt_totalsize(fdt), fdt_off_mem_rsvmap(fdt))) + return -FDT_ERR_TRUNCATED; + + /* Bounds check structure block */ + if (fdt_version(fdt) < 17) { + if (!check_off_(hdrsize, fdt_totalsize(fdt), + fdt_off_dt_struct(fdt))) + return -FDT_ERR_TRUNCATED; + } else { + if (!check_block_(hdrsize, fdt_totalsize(fdt), + fdt_off_dt_struct(fdt), + fdt_size_dt_struct(fdt))) + return -FDT_ERR_TRUNCATED; + } + + /* Bounds check strings block */ + if (!check_block_(hdrsize, fdt_totalsize(fdt), + fdt_off_dt_strings(fdt), fdt_size_dt_strings(fdt))) + return -FDT_ERR_TRUNCATED; + + return 0; +} + +const void *fdt_offset_ptr(const void *fdt, int offset, unsigned int len) +{ + unsigned absoffset = offset + fdt_off_dt_struct(fdt); + + if ((absoffset < offset) + || ((absoffset + len) < absoffset) + || (absoffset + len) > fdt_totalsize(fdt)) + return NULL; + + if (fdt_version(fdt) >= 0x11) + if (((offset + len) < offset) + || ((offset + len) > fdt_size_dt_struct(fdt))) + return NULL; + + return fdt_offset_ptr_(fdt, offset); +} + +uint32_t fdt_next_tag(const void *fdt, int startoffset, int *nextoffset) +{ + const fdt32_t *tagp, *lenp; + uint32_t tag; + int offset = startoffset; + const char *p; + + *nextoffset = -FDT_ERR_TRUNCATED; + tagp = fdt_offset_ptr(fdt, offset, FDT_TAGSIZE); + if (!tagp) + return FDT_END; /* premature end */ + tag = fdt32_to_cpu(*tagp); + offset += FDT_TAGSIZE; + + *nextoffset = -FDT_ERR_BADSTRUCTURE; + switch (tag) { + case FDT_BEGIN_NODE: + /* skip name */ + do { + p = fdt_offset_ptr(fdt, offset++, 1); + } while (p && (*p != '\0')); + if (!p) + return FDT_END; /* premature end */ + break; + + case FDT_PROP: + lenp = fdt_offset_ptr(fdt, offset, sizeof(*lenp)); + if (!lenp) + return FDT_END; /* premature end */ + /* skip-name offset, length and value */ + offset += sizeof(struct fdt_property) - FDT_TAGSIZE + + fdt32_to_cpu(*lenp); + if (fdt_version(fdt) < 0x10 && fdt32_to_cpu(*lenp) >= 8 && + ((offset - fdt32_to_cpu(*lenp)) % 8) != 0) + offset += 4; + break; + + case FDT_END: + case FDT_END_NODE: + case FDT_NOP: + break; + + default: + return FDT_END; + } + + if (!fdt_offset_ptr(fdt, startoffset, offset - startoffset)) + return FDT_END; /* premature end */ + + *nextoffset = FDT_TAGALIGN(offset); + return tag; +} + +int fdt_check_node_offset_(const void *fdt, int offset) +{ + if ((offset < 0) || (offset % FDT_TAGSIZE) + || (fdt_next_tag(fdt, offset, &offset) != FDT_BEGIN_NODE)) + return -FDT_ERR_BADOFFSET; + + return offset; +} + +int fdt_check_prop_offset_(const void *fdt, int offset) +{ + if ((offset < 0) || (offset % FDT_TAGSIZE) + || (fdt_next_tag(fdt, offset, &offset) != FDT_PROP)) + return -FDT_ERR_BADOFFSET; + + return offset; +} + +int fdt_next_node(const void *fdt, int offset, int *depth) +{ + int nextoffset = 0; + uint32_t tag; + + if (offset >= 0) + if ((nextoffset = fdt_check_node_offset_(fdt, offset)) < 0) + return nextoffset; + + do { + offset = nextoffset; + tag = fdt_next_tag(fdt, offset, &nextoffset); + + switch (tag) { + case FDT_PROP: + case FDT_NOP: + break; + + case FDT_BEGIN_NODE: + if (depth) + (*depth)++; + break; + + case FDT_END_NODE: + if (depth && ((--(*depth)) < 0)) + return nextoffset; + break; + + case FDT_END: + if ((nextoffset >= 0) + || ((nextoffset == -FDT_ERR_TRUNCATED) && !depth)) + return -FDT_ERR_NOTFOUND; + else + return nextoffset; + } + } while (tag != FDT_BEGIN_NODE); + + return offset; +} + +int fdt_first_subnode(const void *fdt, int offset) +{ + int depth = 0; + + offset = fdt_next_node(fdt, offset, &depth); + if (offset < 0 || depth != 1) + return -FDT_ERR_NOTFOUND; + + return offset; +} + +int fdt_next_subnode(const void *fdt, int offset) +{ + int depth = 1; + + /* + * With respect to the parent, the depth of the next subnode will be + * the same as the last. + */ + do { + offset = fdt_next_node(fdt, offset, &depth); + if (offset < 0 || depth < 1) + return -FDT_ERR_NOTFOUND; + } while (depth > 1); + + return offset; +} + +const char *fdt_find_string_(const char *strtab, int tabsize, const char *s) +{ + int len = strlen(s) + 1; + const char *last = strtab + tabsize - len; + const char *p; + + for (p = strtab; p <= last; p++) + if (memcmp(p, s, len) == 0) + return p; + return NULL; +} + +int fdt_move(const void *fdt, void *buf, int bufsize) +{ + FDT_RO_PROBE(fdt); + + if (fdt_totalsize(fdt) > bufsize) + return -FDT_ERR_NOSPACE; + + memmove(buf, fdt, fdt_totalsize(fdt)); + return 0; +} diff --git a/vendor/riscv-isa-sim/fdt/fdt.h b/vendor/riscv-isa-sim/fdt/fdt.h new file mode 100644 index 00000000..f2e68807 --- /dev/null +++ b/vendor/riscv-isa-sim/fdt/fdt.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) */ +#ifndef FDT_H +#define FDT_H +/* + * libfdt - Flat Device Tree manipulation + * Copyright (C) 2006 David Gibson, IBM Corporation. + * Copyright 2012 Kim Phillips, Freescale Semiconductor. + */ + +#ifndef __ASSEMBLY__ + +struct fdt_header { + fdt32_t magic; /* magic word FDT_MAGIC */ + fdt32_t totalsize; /* total size of DT block */ + fdt32_t off_dt_struct; /* offset to structure */ + fdt32_t off_dt_strings; /* offset to strings */ + fdt32_t off_mem_rsvmap; /* offset to memory reserve map */ + fdt32_t version; /* format version */ + fdt32_t last_comp_version; /* last compatible version */ + + /* version 2 fields below */ + fdt32_t boot_cpuid_phys; /* Which physical CPU id we're + booting on */ + /* version 3 fields below */ + fdt32_t size_dt_strings; /* size of the strings block */ + + /* version 17 fields below */ + fdt32_t size_dt_struct; /* size of the structure block */ +}; + +struct fdt_reserve_entry { + fdt64_t address; + fdt64_t size; +}; + +struct fdt_node_header { + fdt32_t tag; + char name[0]; +}; + +struct fdt_property { + fdt32_t tag; + fdt32_t len; + fdt32_t nameoff; + char data[0]; +}; + +#endif /* !__ASSEMBLY */ + +#define FDT_MAGIC 0xd00dfeed /* 4: version, 4: total size */ +#define FDT_TAGSIZE sizeof(fdt32_t) + +#define FDT_BEGIN_NODE 0x1 /* Start node: full name */ +#define FDT_END_NODE 0x2 /* End node */ +#define FDT_PROP 0x3 /* Property: name off, + size, content */ +#define FDT_NOP 0x4 /* nop */ +#define FDT_END 0x9 + +#define FDT_V1_SIZE (7*sizeof(fdt32_t)) +#define FDT_V2_SIZE (FDT_V1_SIZE + sizeof(fdt32_t)) +#define FDT_V3_SIZE (FDT_V2_SIZE + sizeof(fdt32_t)) +#define FDT_V16_SIZE FDT_V3_SIZE +#define FDT_V17_SIZE (FDT_V16_SIZE + sizeof(fdt32_t)) + +#endif /* FDT_H */ diff --git a/vendor/riscv-isa-sim/fdt/fdt.mk.in b/vendor/riscv-isa-sim/fdt/fdt.mk.in new file mode 100644 index 00000000..273375ef --- /dev/null +++ b/vendor/riscv-isa-sim/fdt/fdt.mk.in @@ -0,0 +1,17 @@ +fdt_subproject_deps = \ + +fdt_hdrs = \ + fdt.h \ + libfdt.h \ + libfdt_env.h \ + +fdt_c_srcs = \ + fdt.c \ + fdt_ro.c \ + fdt_wip.c \ + fdt_sw.c \ + fdt_rw.c \ + fdt_strerror.c \ + fdt_empty_tree.c \ + fdt_addresses.c \ + fdt_overlay.c \ diff --git a/vendor/riscv-isa-sim/fdt/fdt_addresses.c b/vendor/riscv-isa-sim/fdt/fdt_addresses.c new file mode 100644 index 00000000..9a82cd0b --- /dev/null +++ b/vendor/riscv-isa-sim/fdt/fdt_addresses.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) +/* + * libfdt - Flat Device Tree manipulation + * Copyright (C) 2014 David Gibson + * Copyright (C) 2018 embedded brains GmbH + */ +#include "libfdt_env.h" + +#include +#include + +#include "libfdt_internal.h" + +static int fdt_cells(const void *fdt, int nodeoffset, const char *name) +{ + const fdt32_t *c; + uint32_t val; + int len; + + c = fdt_getprop(fdt, nodeoffset, name, &len); + if (!c) + return len; + + if (len != sizeof(*c)) + return -FDT_ERR_BADNCELLS; + + val = fdt32_to_cpu(*c); + if (val > FDT_MAX_NCELLS) + return -FDT_ERR_BADNCELLS; + + return (int)val; +} + +int fdt_address_cells(const void *fdt, int nodeoffset) +{ + int val; + + val = fdt_cells(fdt, nodeoffset, "#address-cells"); + if (val == 0) + return -FDT_ERR_BADNCELLS; + if (val == -FDT_ERR_NOTFOUND) + return 2; + return val; +} + +int fdt_size_cells(const void *fdt, int nodeoffset) +{ + int val; + + val = fdt_cells(fdt, nodeoffset, "#size-cells"); + if (val == -FDT_ERR_NOTFOUND) + return 1; + return val; +} + +/* This function assumes that [address|size]_cells is 1 or 2 */ +int fdt_appendprop_addrrange(void *fdt, int parent, int nodeoffset, + const char *name, uint64_t addr, uint64_t size) +{ + int addr_cells, size_cells, ret; + uint8_t data[sizeof(fdt64_t) * 2], *prop; + + ret = fdt_address_cells(fdt, parent); + if (ret < 0) + return ret; + addr_cells = ret; + + ret = fdt_size_cells(fdt, parent); + if (ret < 0) + return ret; + size_cells = ret; + + /* check validity of address */ + prop = data; + if (addr_cells == 1) { + if ((addr > UINT32_MAX) || ((UINT32_MAX + 1 - addr) < size)) + return -FDT_ERR_BADVALUE; + + fdt32_st(prop, (uint32_t)addr); + } else if (addr_cells == 2) { + fdt64_st(prop, addr); + } else { + return -FDT_ERR_BADNCELLS; + } + + /* check validity of size */ + prop += addr_cells * sizeof(fdt32_t); + if (size_cells == 1) { + if (size > UINT32_MAX) + return -FDT_ERR_BADVALUE; + + fdt32_st(prop, (uint32_t)size); + } else if (size_cells == 2) { + fdt64_st(prop, size); + } else { + return -FDT_ERR_BADNCELLS; + } + + return fdt_appendprop(fdt, nodeoffset, name, data, + (addr_cells + size_cells) * sizeof(fdt32_t)); +} diff --git a/vendor/riscv-isa-sim/fdt/fdt_empty_tree.c b/vendor/riscv-isa-sim/fdt/fdt_empty_tree.c new file mode 100644 index 00000000..49d54d44 --- /dev/null +++ b/vendor/riscv-isa-sim/fdt/fdt_empty_tree.c @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) +/* + * libfdt - Flat Device Tree manipulation + * Copyright (C) 2012 David Gibson, IBM Corporation. + */ +#include "libfdt_env.h" + +#include +#include + +#include "libfdt_internal.h" + +int fdt_create_empty_tree(void *buf, int bufsize) +{ + int err; + + err = fdt_create(buf, bufsize); + if (err) + return err; + + err = fdt_finish_reservemap(buf); + if (err) + return err; + + err = fdt_begin_node(buf, ""); + if (err) + return err; + + err = fdt_end_node(buf); + if (err) + return err; + + err = fdt_finish(buf); + if (err) + return err; + + return fdt_open_into(buf, buf, bufsize); +} diff --git a/vendor/riscv-isa-sim/fdt/fdt_overlay.c b/vendor/riscv-isa-sim/fdt/fdt_overlay.c new file mode 100644 index 00000000..be718733 --- /dev/null +++ b/vendor/riscv-isa-sim/fdt/fdt_overlay.c @@ -0,0 +1,881 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) +/* + * libfdt - Flat Device Tree manipulation + * Copyright (C) 2016 Free Electrons + * Copyright (C) 2016 NextThing Co. + */ +#include "libfdt_env.h" + +#include +#include + +#include "libfdt_internal.h" + +/** + * overlay_get_target_phandle - retrieves the target phandle of a fragment + * @fdto: pointer to the device tree overlay blob + * @fragment: node offset of the fragment in the overlay + * + * overlay_get_target_phandle() retrieves the target phandle of an + * overlay fragment when that fragment uses a phandle (target + * property) instead of a path (target-path property). + * + * returns: + * the phandle pointed by the target property + * 0, if the phandle was not found + * -1, if the phandle was malformed + */ +static uint32_t overlay_get_target_phandle(const void *fdto, int fragment) +{ + const fdt32_t *val; + int len; + + val = fdt_getprop(fdto, fragment, "target", &len); + if (!val) + return 0; + + if ((len != sizeof(*val)) || (fdt32_to_cpu(*val) == (uint32_t)-1)) + return (uint32_t)-1; + + return fdt32_to_cpu(*val); +} + +/** + * overlay_get_target - retrieves the offset of a fragment's target + * @fdt: Base device tree blob + * @fdto: Device tree overlay blob + * @fragment: node offset of the fragment in the overlay + * @pathp: pointer which receives the path of the target (or NULL) + * + * overlay_get_target() retrieves the target offset in the base + * device tree of a fragment, no matter how the actual targeting is + * done (through a phandle or a path) + * + * returns: + * the targeted node offset in the base device tree + * Negative error code on error + */ +static int overlay_get_target(const void *fdt, const void *fdto, + int fragment, char const **pathp) +{ + uint32_t phandle; + const char *path = NULL; + int path_len = 0, ret; + + /* Try first to do a phandle based lookup */ + phandle = overlay_get_target_phandle(fdto, fragment); + if (phandle == (uint32_t)-1) + return -FDT_ERR_BADPHANDLE; + + /* no phandle, try path */ + if (!phandle) { + /* And then a path based lookup */ + path = fdt_getprop(fdto, fragment, "target-path", &path_len); + if (path) + ret = fdt_path_offset(fdt, path); + else + ret = path_len; + } else + ret = fdt_node_offset_by_phandle(fdt, phandle); + + /* + * If we haven't found either a target or a + * target-path property in a node that contains a + * __overlay__ subnode (we wouldn't be called + * otherwise), consider it a improperly written + * overlay + */ + if (ret < 0 && path_len == -FDT_ERR_NOTFOUND) + ret = -FDT_ERR_BADOVERLAY; + + /* return on error */ + if (ret < 0) + return ret; + + /* return pointer to path (if available) */ + if (pathp) + *pathp = path ? path : NULL; + + return ret; +} + +/** + * overlay_phandle_add_offset - Increases a phandle by an offset + * @fdt: Base device tree blob + * @node: Device tree overlay blob + * @name: Name of the property to modify (phandle or linux,phandle) + * @delta: offset to apply + * + * overlay_phandle_add_offset() increments a node phandle by a given + * offset. + * + * returns: + * 0 on success. + * Negative error code on error + */ +static int overlay_phandle_add_offset(void *fdt, int node, + const char *name, uint32_t delta) +{ + const fdt32_t *val; + uint32_t adj_val; + int len; + + val = fdt_getprop(fdt, node, name, &len); + if (!val) + return len; + + if (len != sizeof(*val)) + return -FDT_ERR_BADPHANDLE; + + adj_val = fdt32_to_cpu(*val); + if ((adj_val + delta) < adj_val) + return -FDT_ERR_NOPHANDLES; + + adj_val += delta; + if (adj_val == (uint32_t)-1) + return -FDT_ERR_NOPHANDLES; + + return fdt_setprop_inplace_u32(fdt, node, name, adj_val); +} + +/** + * overlay_adjust_node_phandles - Offsets the phandles of a node + * @fdto: Device tree overlay blob + * @node: Offset of the node we want to adjust + * @delta: Offset to shift the phandles of + * + * overlay_adjust_node_phandles() adds a constant to all the phandles + * of a given node. This is mainly use as part of the overlay + * application process, when we want to update all the overlay + * phandles to not conflict with the overlays of the base device tree. + * + * returns: + * 0 on success + * Negative error code on failure + */ +static int overlay_adjust_node_phandles(void *fdto, int node, + uint32_t delta) +{ + int child; + int ret; + + ret = overlay_phandle_add_offset(fdto, node, "phandle", delta); + if (ret && ret != -FDT_ERR_NOTFOUND) + return ret; + + ret = overlay_phandle_add_offset(fdto, node, "linux,phandle", delta); + if (ret && ret != -FDT_ERR_NOTFOUND) + return ret; + + fdt_for_each_subnode(child, fdto, node) { + ret = overlay_adjust_node_phandles(fdto, child, delta); + if (ret) + return ret; + } + + return 0; +} + +/** + * overlay_adjust_local_phandles - Adjust the phandles of a whole overlay + * @fdto: Device tree overlay blob + * @delta: Offset to shift the phandles of + * + * overlay_adjust_local_phandles() adds a constant to all the + * phandles of an overlay. This is mainly use as part of the overlay + * application process, when we want to update all the overlay + * phandles to not conflict with the overlays of the base device tree. + * + * returns: + * 0 on success + * Negative error code on failure + */ +static int overlay_adjust_local_phandles(void *fdto, uint32_t delta) +{ + /* + * Start adjusting the phandles from the overlay root + */ + return overlay_adjust_node_phandles(fdto, 0, delta); +} + +/** + * overlay_update_local_node_references - Adjust the overlay references + * @fdto: Device tree overlay blob + * @tree_node: Node offset of the node to operate on + * @fixup_node: Node offset of the matching local fixups node + * @delta: Offset to shift the phandles of + * + * overlay_update_local_nodes_references() update the phandles + * pointing to a node within the device tree overlay by adding a + * constant delta. + * + * This is mainly used as part of a device tree application process, + * where you want the device tree overlays phandles to not conflict + * with the ones from the base device tree before merging them. + * + * returns: + * 0 on success + * Negative error code on failure + */ +static int overlay_update_local_node_references(void *fdto, + int tree_node, + int fixup_node, + uint32_t delta) +{ + int fixup_prop; + int fixup_child; + int ret; + + fdt_for_each_property_offset(fixup_prop, fdto, fixup_node) { + const fdt32_t *fixup_val; + const char *tree_val; + const char *name; + int fixup_len; + int tree_len; + int i; + + fixup_val = fdt_getprop_by_offset(fdto, fixup_prop, + &name, &fixup_len); + if (!fixup_val) + return fixup_len; + + if (fixup_len % sizeof(uint32_t)) + return -FDT_ERR_BADOVERLAY; + + tree_val = fdt_getprop(fdto, tree_node, name, &tree_len); + if (!tree_val) { + if (tree_len == -FDT_ERR_NOTFOUND) + return -FDT_ERR_BADOVERLAY; + + return tree_len; + } + + for (i = 0; i < (fixup_len / sizeof(uint32_t)); i++) { + fdt32_t adj_val; + uint32_t poffset; + + poffset = fdt32_to_cpu(fixup_val[i]); + + /* + * phandles to fixup can be unaligned. + * + * Use a memcpy for the architectures that do + * not support unaligned accesses. + */ + memcpy(&adj_val, tree_val + poffset, sizeof(adj_val)); + + adj_val = cpu_to_fdt32(fdt32_to_cpu(adj_val) + delta); + + ret = fdt_setprop_inplace_namelen_partial(fdto, + tree_node, + name, + strlen(name), + poffset, + &adj_val, + sizeof(adj_val)); + if (ret == -FDT_ERR_NOSPACE) + return -FDT_ERR_BADOVERLAY; + + if (ret) + return ret; + } + } + + fdt_for_each_subnode(fixup_child, fdto, fixup_node) { + const char *fixup_child_name = fdt_get_name(fdto, fixup_child, + NULL); + int tree_child; + + tree_child = fdt_subnode_offset(fdto, tree_node, + fixup_child_name); + if (tree_child == -FDT_ERR_NOTFOUND) + return -FDT_ERR_BADOVERLAY; + if (tree_child < 0) + return tree_child; + + ret = overlay_update_local_node_references(fdto, + tree_child, + fixup_child, + delta); + if (ret) + return ret; + } + + return 0; +} + +/** + * overlay_update_local_references - Adjust the overlay references + * @fdto: Device tree overlay blob + * @delta: Offset to shift the phandles of + * + * overlay_update_local_references() update all the phandles pointing + * to a node within the device tree overlay by adding a constant + * delta to not conflict with the base overlay. + * + * This is mainly used as part of a device tree application process, + * where you want the device tree overlays phandles to not conflict + * with the ones from the base device tree before merging them. + * + * returns: + * 0 on success + * Negative error code on failure + */ +static int overlay_update_local_references(void *fdto, uint32_t delta) +{ + int fixups; + + fixups = fdt_path_offset(fdto, "/__local_fixups__"); + if (fixups < 0) { + /* There's no local phandles to adjust, bail out */ + if (fixups == -FDT_ERR_NOTFOUND) + return 0; + + return fixups; + } + + /* + * Update our local references from the root of the tree + */ + return overlay_update_local_node_references(fdto, 0, fixups, + delta); +} + +/** + * overlay_fixup_one_phandle - Set an overlay phandle to the base one + * @fdt: Base Device Tree blob + * @fdto: Device tree overlay blob + * @symbols_off: Node offset of the symbols node in the base device tree + * @path: Path to a node holding a phandle in the overlay + * @path_len: number of path characters to consider + * @name: Name of the property holding the phandle reference in the overlay + * @name_len: number of name characters to consider + * @poffset: Offset within the overlay property where the phandle is stored + * @label: Label of the node referenced by the phandle + * + * overlay_fixup_one_phandle() resolves an overlay phandle pointing to + * a node in the base device tree. + * + * This is part of the device tree overlay application process, when + * you want all the phandles in the overlay to point to the actual + * base dt nodes. + * + * returns: + * 0 on success + * Negative error code on failure + */ +static int overlay_fixup_one_phandle(void *fdt, void *fdto, + int symbols_off, + const char *path, uint32_t path_len, + const char *name, uint32_t name_len, + int poffset, const char *label) +{ + const char *symbol_path; + uint32_t phandle; + fdt32_t phandle_prop; + int symbol_off, fixup_off; + int prop_len; + + if (symbols_off < 0) + return symbols_off; + + symbol_path = fdt_getprop(fdt, symbols_off, label, + &prop_len); + if (!symbol_path) + return prop_len; + + symbol_off = fdt_path_offset(fdt, symbol_path); + if (symbol_off < 0) + return symbol_off; + + phandle = fdt_get_phandle(fdt, symbol_off); + if (!phandle) + return -FDT_ERR_NOTFOUND; + + fixup_off = fdt_path_offset_namelen(fdto, path, path_len); + if (fixup_off == -FDT_ERR_NOTFOUND) + return -FDT_ERR_BADOVERLAY; + if (fixup_off < 0) + return fixup_off; + + phandle_prop = cpu_to_fdt32(phandle); + return fdt_setprop_inplace_namelen_partial(fdto, fixup_off, + name, name_len, poffset, + &phandle_prop, + sizeof(phandle_prop)); +}; + +/** + * overlay_fixup_phandle - Set an overlay phandle to the base one + * @fdt: Base Device Tree blob + * @fdto: Device tree overlay blob + * @symbols_off: Node offset of the symbols node in the base device tree + * @property: Property offset in the overlay holding the list of fixups + * + * overlay_fixup_phandle() resolves all the overlay phandles pointed + * to in a __fixups__ property, and updates them to match the phandles + * in use in the base device tree. + * + * This is part of the device tree overlay application process, when + * you want all the phandles in the overlay to point to the actual + * base dt nodes. + * + * returns: + * 0 on success + * Negative error code on failure + */ +static int overlay_fixup_phandle(void *fdt, void *fdto, int symbols_off, + int property) +{ + const char *value; + const char *label; + int len; + + value = fdt_getprop_by_offset(fdto, property, + &label, &len); + if (!value) { + if (len == -FDT_ERR_NOTFOUND) + return -FDT_ERR_INTERNAL; + + return len; + } + + do { + const char *path, *name, *fixup_end; + const char *fixup_str = value; + uint32_t path_len, name_len; + uint32_t fixup_len; + char *sep, *endptr; + int poffset, ret; + + fixup_end = memchr(value, '\0', len); + if (!fixup_end) + return -FDT_ERR_BADOVERLAY; + fixup_len = fixup_end - fixup_str; + + len -= fixup_len + 1; + value += fixup_len + 1; + + path = fixup_str; + sep = memchr(fixup_str, ':', fixup_len); + if (!sep || *sep != ':') + return -FDT_ERR_BADOVERLAY; + + path_len = sep - path; + if (path_len == (fixup_len - 1)) + return -FDT_ERR_BADOVERLAY; + + fixup_len -= path_len + 1; + name = sep + 1; + sep = memchr(name, ':', fixup_len); + if (!sep || *sep != ':') + return -FDT_ERR_BADOVERLAY; + + name_len = sep - name; + if (!name_len) + return -FDT_ERR_BADOVERLAY; + + poffset = strtoul(sep + 1, &endptr, 10); + if ((*endptr != '\0') || (endptr <= (sep + 1))) + return -FDT_ERR_BADOVERLAY; + + ret = overlay_fixup_one_phandle(fdt, fdto, symbols_off, + path, path_len, name, name_len, + poffset, label); + if (ret) + return ret; + } while (len > 0); + + return 0; +} + +/** + * overlay_fixup_phandles - Resolve the overlay phandles to the base + * device tree + * @fdt: Base Device Tree blob + * @fdto: Device tree overlay blob + * + * overlay_fixup_phandles() resolves all the overlay phandles pointing + * to nodes in the base device tree. + * + * This is one of the steps of the device tree overlay application + * process, when you want all the phandles in the overlay to point to + * the actual base dt nodes. + * + * returns: + * 0 on success + * Negative error code on failure + */ +static int overlay_fixup_phandles(void *fdt, void *fdto) +{ + int fixups_off, symbols_off; + int property; + + /* We can have overlays without any fixups */ + fixups_off = fdt_path_offset(fdto, "/__fixups__"); + if (fixups_off == -FDT_ERR_NOTFOUND) + return 0; /* nothing to do */ + if (fixups_off < 0) + return fixups_off; + + /* And base DTs without symbols */ + symbols_off = fdt_path_offset(fdt, "/__symbols__"); + if ((symbols_off < 0 && (symbols_off != -FDT_ERR_NOTFOUND))) + return symbols_off; + + fdt_for_each_property_offset(property, fdto, fixups_off) { + int ret; + + ret = overlay_fixup_phandle(fdt, fdto, symbols_off, property); + if (ret) + return ret; + } + + return 0; +} + +/** + * overlay_apply_node - Merges a node into the base device tree + * @fdt: Base Device Tree blob + * @target: Node offset in the base device tree to apply the fragment to + * @fdto: Device tree overlay blob + * @node: Node offset in the overlay holding the changes to merge + * + * overlay_apply_node() merges a node into a target base device tree + * node pointed. + * + * This is part of the final step in the device tree overlay + * application process, when all the phandles have been adjusted and + * resolved and you just have to merge overlay into the base device + * tree. + * + * returns: + * 0 on success + * Negative error code on failure + */ +static int overlay_apply_node(void *fdt, int target, + void *fdto, int node) +{ + int property; + int subnode; + + fdt_for_each_property_offset(property, fdto, node) { + const char *name; + const void *prop; + int prop_len; + int ret; + + prop = fdt_getprop_by_offset(fdto, property, &name, + &prop_len); + if (prop_len == -FDT_ERR_NOTFOUND) + return -FDT_ERR_INTERNAL; + if (prop_len < 0) + return prop_len; + + ret = fdt_setprop(fdt, target, name, prop, prop_len); + if (ret) + return ret; + } + + fdt_for_each_subnode(subnode, fdto, node) { + const char *name = fdt_get_name(fdto, subnode, NULL); + int nnode; + int ret; + + nnode = fdt_add_subnode(fdt, target, name); + if (nnode == -FDT_ERR_EXISTS) { + nnode = fdt_subnode_offset(fdt, target, name); + if (nnode == -FDT_ERR_NOTFOUND) + return -FDT_ERR_INTERNAL; + } + + if (nnode < 0) + return nnode; + + ret = overlay_apply_node(fdt, nnode, fdto, subnode); + if (ret) + return ret; + } + + return 0; +} + +/** + * overlay_merge - Merge an overlay into its base device tree + * @fdt: Base Device Tree blob + * @fdto: Device tree overlay blob + * + * overlay_merge() merges an overlay into its base device tree. + * + * This is the next to last step in the device tree overlay application + * process, when all the phandles have been adjusted and resolved and + * you just have to merge overlay into the base device tree. + * + * returns: + * 0 on success + * Negative error code on failure + */ +static int overlay_merge(void *fdt, void *fdto) +{ + int fragment; + + fdt_for_each_subnode(fragment, fdto, 0) { + int overlay; + int target; + int ret; + + /* + * Each fragments will have an __overlay__ node. If + * they don't, it's not supposed to be merged + */ + overlay = fdt_subnode_offset(fdto, fragment, "__overlay__"); + if (overlay == -FDT_ERR_NOTFOUND) + continue; + + if (overlay < 0) + return overlay; + + target = overlay_get_target(fdt, fdto, fragment, NULL); + if (target < 0) + return target; + + ret = overlay_apply_node(fdt, target, fdto, overlay); + if (ret) + return ret; + } + + return 0; +} + +static int get_path_len(const void *fdt, int nodeoffset) +{ + int len = 0, namelen; + const char *name; + + FDT_RO_PROBE(fdt); + + for (;;) { + name = fdt_get_name(fdt, nodeoffset, &namelen); + if (!name) + return namelen; + + /* root? we're done */ + if (namelen == 0) + break; + + nodeoffset = fdt_parent_offset(fdt, nodeoffset); + if (nodeoffset < 0) + return nodeoffset; + len += namelen + 1; + } + + /* in case of root pretend it's "/" */ + if (len == 0) + len++; + return len; +} + +/** + * overlay_symbol_update - Update the symbols of base tree after a merge + * @fdt: Base Device Tree blob + * @fdto: Device tree overlay blob + * + * overlay_symbol_update() updates the symbols of the base tree with the + * symbols of the applied overlay + * + * This is the last step in the device tree overlay application + * process, allowing the reference of overlay symbols by subsequent + * overlay operations. + * + * returns: + * 0 on success + * Negative error code on failure + */ +static int overlay_symbol_update(void *fdt, void *fdto) +{ + int root_sym, ov_sym, prop, path_len, fragment, target; + int len, frag_name_len, ret, rel_path_len; + const char *s, *e; + const char *path; + const char *name; + const char *frag_name; + const char *rel_path; + const char *target_path; + char *buf; + void *p; + + ov_sym = fdt_subnode_offset(fdto, 0, "__symbols__"); + + /* if no overlay symbols exist no problem */ + if (ov_sym < 0) + return 0; + + root_sym = fdt_subnode_offset(fdt, 0, "__symbols__"); + + /* it no root symbols exist we should create them */ + if (root_sym == -FDT_ERR_NOTFOUND) + root_sym = fdt_add_subnode(fdt, 0, "__symbols__"); + + /* any error is fatal now */ + if (root_sym < 0) + return root_sym; + + /* iterate over each overlay symbol */ + fdt_for_each_property_offset(prop, fdto, ov_sym) { + path = fdt_getprop_by_offset(fdto, prop, &name, &path_len); + if (!path) + return path_len; + + /* verify it's a string property (terminated by a single \0) */ + if (path_len < 1 || memchr(path, '\0', path_len) != &path[path_len - 1]) + return -FDT_ERR_BADVALUE; + + /* keep end marker to avoid strlen() */ + e = path + path_len; + + if (*path != '/') + return -FDT_ERR_BADVALUE; + + /* get fragment name first */ + s = strchr(path + 1, '/'); + if (!s) { + /* Symbol refers to something that won't end + * up in the target tree */ + continue; + } + + frag_name = path + 1; + frag_name_len = s - path - 1; + + /* verify format; safe since "s" lies in \0 terminated prop */ + len = sizeof("/__overlay__/") - 1; + if ((e - s) > len && (memcmp(s, "/__overlay__/", len) == 0)) { + /* //__overlay__/ */ + rel_path = s + len; + rel_path_len = e - rel_path; + } else if ((e - s) == len + && (memcmp(s, "/__overlay__", len - 1) == 0)) { + /* //__overlay__ */ + rel_path = ""; + rel_path_len = 0; + } else { + /* Symbol refers to something that won't end + * up in the target tree */ + continue; + } + + /* find the fragment index in which the symbol lies */ + ret = fdt_subnode_offset_namelen(fdto, 0, frag_name, + frag_name_len); + /* not found? */ + if (ret < 0) + return -FDT_ERR_BADOVERLAY; + fragment = ret; + + /* an __overlay__ subnode must exist */ + ret = fdt_subnode_offset(fdto, fragment, "__overlay__"); + if (ret < 0) + return -FDT_ERR_BADOVERLAY; + + /* get the target of the fragment */ + ret = overlay_get_target(fdt, fdto, fragment, &target_path); + if (ret < 0) + return ret; + target = ret; + + /* if we have a target path use */ + if (!target_path) { + ret = get_path_len(fdt, target); + if (ret < 0) + return ret; + len = ret; + } else { + len = strlen(target_path); + } + + ret = fdt_setprop_placeholder(fdt, root_sym, name, + len + (len > 1) + rel_path_len + 1, &p); + if (ret < 0) + return ret; + + if (!target_path) { + /* again in case setprop_placeholder changed it */ + ret = overlay_get_target(fdt, fdto, fragment, &target_path); + if (ret < 0) + return ret; + target = ret; + } + + buf = p; + if (len > 1) { /* target is not root */ + if (!target_path) { + ret = fdt_get_path(fdt, target, buf, len + 1); + if (ret < 0) + return ret; + } else + memcpy(buf, target_path, len + 1); + + } else + len--; + + buf[len] = '/'; + memcpy(buf + len + 1, rel_path, rel_path_len); + buf[len + 1 + rel_path_len] = '\0'; + } + + return 0; +} + +int fdt_overlay_apply(void *fdt, void *fdto) +{ + uint32_t delta; + int ret; + + FDT_RO_PROBE(fdt); + FDT_RO_PROBE(fdto); + + ret = fdt_find_max_phandle(fdt, &delta); + if (ret) + goto err; + + ret = overlay_adjust_local_phandles(fdto, delta); + if (ret) + goto err; + + ret = overlay_update_local_references(fdto, delta); + if (ret) + goto err; + + ret = overlay_fixup_phandles(fdt, fdto); + if (ret) + goto err; + + ret = overlay_merge(fdt, fdto); + if (ret) + goto err; + + ret = overlay_symbol_update(fdt, fdto); + if (ret) + goto err; + + /* + * The overlay has been damaged, erase its magic. + */ + fdt_set_magic(fdto, ~0); + + return 0; + +err: + /* + * The overlay might have been damaged, erase its magic. + */ + fdt_set_magic(fdto, ~0); + + /* + * The base device tree might have been damaged, erase its + * magic. + */ + fdt_set_magic(fdt, ~0); + + return ret; +} diff --git a/vendor/riscv-isa-sim/fdt/fdt_ro.c b/vendor/riscv-isa-sim/fdt/fdt_ro.c new file mode 100644 index 00000000..a5c2797c --- /dev/null +++ b/vendor/riscv-isa-sim/fdt/fdt_ro.c @@ -0,0 +1,898 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) +/* + * libfdt - Flat Device Tree manipulation + * Copyright (C) 2006 David Gibson, IBM Corporation. + */ +#include "libfdt_env.h" + +#include +#include + +#include "libfdt_internal.h" + +static int fdt_nodename_eq_(const void *fdt, int offset, + const char *s, int len) +{ + int olen; + const char *p = fdt_get_name(fdt, offset, &olen); + + if (!p || olen < len) + /* short match */ + return 0; + + if (memcmp(p, s, len) != 0) + return 0; + + if (p[len] == '\0') + return 1; + else if (!memchr(s, '@', len) && (p[len] == '@')) + return 1; + else + return 0; +} + +const char *fdt_get_string(const void *fdt, int stroffset, int *lenp) +{ + int32_t totalsize = fdt_ro_probe_(fdt); + uint32_t absoffset = stroffset + fdt_off_dt_strings(fdt); + size_t len; + int err; + const char *s, *n; + + err = totalsize; + if (totalsize < 0) + goto fail; + + err = -FDT_ERR_BADOFFSET; + if (absoffset >= totalsize) + goto fail; + len = totalsize - absoffset; + + if (fdt_magic(fdt) == FDT_MAGIC) { + if (stroffset < 0) + goto fail; + if (fdt_version(fdt) >= 17) { + if (stroffset >= fdt_size_dt_strings(fdt)) + goto fail; + if ((fdt_size_dt_strings(fdt) - stroffset) < len) + len = fdt_size_dt_strings(fdt) - stroffset; + } + } else if (fdt_magic(fdt) == FDT_SW_MAGIC) { + if ((stroffset >= 0) + || (stroffset < -fdt_size_dt_strings(fdt))) + goto fail; + if ((-stroffset) < len) + len = -stroffset; + } else { + err = -FDT_ERR_INTERNAL; + goto fail; + } + + s = (const char *)fdt + absoffset; + n = memchr(s, '\0', len); + if (!n) { + /* missing terminating NULL */ + err = -FDT_ERR_TRUNCATED; + goto fail; + } + + if (lenp) + *lenp = n - s; + return s; + +fail: + if (lenp) + *lenp = err; + return NULL; +} + +const char *fdt_string(const void *fdt, int stroffset) +{ + return fdt_get_string(fdt, stroffset, NULL); +} + +static int fdt_string_eq_(const void *fdt, int stroffset, + const char *s, int len) +{ + int slen; + const char *p = fdt_get_string(fdt, stroffset, &slen); + + return p && (slen == len) && (memcmp(p, s, len) == 0); +} + +int fdt_find_max_phandle(const void *fdt, uint32_t *phandle) +{ + uint32_t max = 0; + int offset = -1; + + while (true) { + uint32_t value; + + offset = fdt_next_node(fdt, offset, NULL); + if (offset < 0) { + if (offset == -FDT_ERR_NOTFOUND) + break; + + return offset; + } + + value = fdt_get_phandle(fdt, offset); + + if (value > max) + max = value; + } + + if (phandle) + *phandle = max; + + return 0; +} + +int fdt_generate_phandle(const void *fdt, uint32_t *phandle) +{ + uint32_t max; + int err; + + err = fdt_find_max_phandle(fdt, &max); + if (err < 0) + return err; + + if (max == FDT_MAX_PHANDLE) + return -FDT_ERR_NOPHANDLES; + + if (phandle) + *phandle = max + 1; + + return 0; +} + +static const struct fdt_reserve_entry *fdt_mem_rsv(const void *fdt, int n) +{ + int offset = n * sizeof(struct fdt_reserve_entry); + int absoffset = fdt_off_mem_rsvmap(fdt) + offset; + + if (absoffset < fdt_off_mem_rsvmap(fdt)) + return NULL; + if (absoffset > fdt_totalsize(fdt) - sizeof(struct fdt_reserve_entry)) + return NULL; + return fdt_mem_rsv_(fdt, n); +} + +int fdt_get_mem_rsv(const void *fdt, int n, uint64_t *address, uint64_t *size) +{ + const struct fdt_reserve_entry *re; + + FDT_RO_PROBE(fdt); + re = fdt_mem_rsv(fdt, n); + if (!re) + return -FDT_ERR_BADOFFSET; + + *address = fdt64_ld(&re->address); + *size = fdt64_ld(&re->size); + return 0; +} + +int fdt_num_mem_rsv(const void *fdt) +{ + int i; + const struct fdt_reserve_entry *re; + + for (i = 0; (re = fdt_mem_rsv(fdt, i)) != NULL; i++) { + if (fdt64_ld(&re->size) == 0) + return i; + } + return -FDT_ERR_TRUNCATED; +} + +static int nextprop_(const void *fdt, int offset) +{ + uint32_t tag; + int nextoffset; + + do { + tag = fdt_next_tag(fdt, offset, &nextoffset); + + switch (tag) { + case FDT_END: + if (nextoffset >= 0) + return -FDT_ERR_BADSTRUCTURE; + else + return nextoffset; + + case FDT_PROP: + return offset; + } + offset = nextoffset; + } while (tag == FDT_NOP); + + return -FDT_ERR_NOTFOUND; +} + +int fdt_subnode_offset_namelen(const void *fdt, int offset, + const char *name, int namelen) +{ + int depth; + + FDT_RO_PROBE(fdt); + + for (depth = 0; + (offset >= 0) && (depth >= 0); + offset = fdt_next_node(fdt, offset, &depth)) + if ((depth == 1) + && fdt_nodename_eq_(fdt, offset, name, namelen)) + return offset; + + if (depth < 0) + return -FDT_ERR_NOTFOUND; + return offset; /* error */ +} + +int fdt_subnode_offset(const void *fdt, int parentoffset, + const char *name) +{ + return fdt_subnode_offset_namelen(fdt, parentoffset, name, strlen(name)); +} + +int fdt_path_offset_namelen(const void *fdt, const char *path, int namelen) +{ + const char *end = path + namelen; + const char *p = path; + int offset = 0; + + FDT_RO_PROBE(fdt); + + /* see if we have an alias */ + if (*path != '/') { + const char *q = memchr(path, '/', end - p); + + if (!q) + q = end; + + p = fdt_get_alias_namelen(fdt, p, q - p); + if (!p) + return -FDT_ERR_BADPATH; + offset = fdt_path_offset(fdt, p); + + p = q; + } + + while (p < end) { + const char *q; + + while (*p == '/') { + p++; + if (p == end) + return offset; + } + q = memchr(p, '/', end - p); + if (! q) + q = end; + + offset = fdt_subnode_offset_namelen(fdt, offset, p, q-p); + if (offset < 0) + return offset; + + p = q; + } + + return offset; +} + +int fdt_path_offset(const void *fdt, const char *path) +{ + return fdt_path_offset_namelen(fdt, path, strlen(path)); +} + +const char *fdt_get_name(const void *fdt, int nodeoffset, int *len) +{ + const struct fdt_node_header *nh = fdt_offset_ptr_(fdt, nodeoffset); + const char *nameptr; + int err; + + if (((err = fdt_ro_probe_(fdt)) < 0) + || ((err = fdt_check_node_offset_(fdt, nodeoffset)) < 0)) + goto fail; + + nameptr = nh->name; + + if (fdt_version(fdt) < 0x10) { + /* + * For old FDT versions, match the naming conventions of V16: + * give only the leaf name (after all /). The actual tree + * contents are loosely checked. + */ + const char *leaf; + leaf = strrchr(nameptr, '/'); + if (leaf == NULL) { + err = -FDT_ERR_BADSTRUCTURE; + goto fail; + } + nameptr = leaf+1; + } + + if (len) + *len = strlen(nameptr); + + return nameptr; + + fail: + if (len) + *len = err; + return NULL; +} + +int fdt_first_property_offset(const void *fdt, int nodeoffset) +{ + int offset; + + if ((offset = fdt_check_node_offset_(fdt, nodeoffset)) < 0) + return offset; + + return nextprop_(fdt, offset); +} + +int fdt_next_property_offset(const void *fdt, int offset) +{ + if ((offset = fdt_check_prop_offset_(fdt, offset)) < 0) + return offset; + + return nextprop_(fdt, offset); +} + +static const struct fdt_property *fdt_get_property_by_offset_(const void *fdt, + int offset, + int *lenp) +{ + int err; + const struct fdt_property *prop; + + if ((err = fdt_check_prop_offset_(fdt, offset)) < 0) { + if (lenp) + *lenp = err; + return NULL; + } + + prop = fdt_offset_ptr_(fdt, offset); + + if (lenp) + *lenp = fdt32_ld(&prop->len); + + return prop; +} + +const struct fdt_property *fdt_get_property_by_offset(const void *fdt, + int offset, + int *lenp) +{ + /* Prior to version 16, properties may need realignment + * and this API does not work. fdt_getprop_*() will, however. */ + + if (fdt_version(fdt) < 0x10) { + if (lenp) + *lenp = -FDT_ERR_BADVERSION; + return NULL; + } + + return fdt_get_property_by_offset_(fdt, offset, lenp); +} + +static const struct fdt_property *fdt_get_property_namelen_(const void *fdt, + int offset, + const char *name, + int namelen, + int *lenp, + int *poffset) +{ + for (offset = fdt_first_property_offset(fdt, offset); + (offset >= 0); + (offset = fdt_next_property_offset(fdt, offset))) { + const struct fdt_property *prop; + + if (!(prop = fdt_get_property_by_offset_(fdt, offset, lenp))) { + offset = -FDT_ERR_INTERNAL; + break; + } + if (fdt_string_eq_(fdt, fdt32_ld(&prop->nameoff), + name, namelen)) { + if (poffset) + *poffset = offset; + return prop; + } + } + + if (lenp) + *lenp = offset; + return NULL; +} + + +const struct fdt_property *fdt_get_property_namelen(const void *fdt, + int offset, + const char *name, + int namelen, int *lenp) +{ + /* Prior to version 16, properties may need realignment + * and this API does not work. fdt_getprop_*() will, however. */ + if (fdt_version(fdt) < 0x10) { + if (lenp) + *lenp = -FDT_ERR_BADVERSION; + return NULL; + } + + return fdt_get_property_namelen_(fdt, offset, name, namelen, lenp, + NULL); +} + + +const struct fdt_property *fdt_get_property(const void *fdt, + int nodeoffset, + const char *name, int *lenp) +{ + return fdt_get_property_namelen(fdt, nodeoffset, name, + strlen(name), lenp); +} + +const void *fdt_getprop_namelen(const void *fdt, int nodeoffset, + const char *name, int namelen, int *lenp) +{ + int poffset; + const struct fdt_property *prop; + + prop = fdt_get_property_namelen_(fdt, nodeoffset, name, namelen, lenp, + &poffset); + if (!prop) + return NULL; + + /* Handle realignment */ + if (fdt_version(fdt) < 0x10 && (poffset + sizeof(*prop)) % 8 && + fdt32_ld(&prop->len) >= 8) + return prop->data + 4; + return prop->data; +} + +const void *fdt_getprop_by_offset(const void *fdt, int offset, + const char **namep, int *lenp) +{ + const struct fdt_property *prop; + + prop = fdt_get_property_by_offset_(fdt, offset, lenp); + if (!prop) + return NULL; + if (namep) { + const char *name; + int namelen; + name = fdt_get_string(fdt, fdt32_ld(&prop->nameoff), + &namelen); + if (!name) { + if (lenp) + *lenp = namelen; + return NULL; + } + *namep = name; + } + + /* Handle realignment */ + if (fdt_version(fdt) < 0x10 && (offset + sizeof(*prop)) % 8 && + fdt32_ld(&prop->len) >= 8) + return prop->data + 4; + return prop->data; +} + +const void *fdt_getprop(const void *fdt, int nodeoffset, + const char *name, int *lenp) +{ + return fdt_getprop_namelen(fdt, nodeoffset, name, strlen(name), lenp); +} + +uint32_t fdt_get_phandle(const void *fdt, int nodeoffset) +{ + const fdt32_t *php; + int len; + + /* FIXME: This is a bit sub-optimal, since we potentially scan + * over all the properties twice. */ + php = fdt_getprop(fdt, nodeoffset, "phandle", &len); + if (!php || (len != sizeof(*php))) { + php = fdt_getprop(fdt, nodeoffset, "linux,phandle", &len); + if (!php || (len != sizeof(*php))) + return 0; + } + + return fdt32_ld(php); +} + +const char *fdt_get_alias_namelen(const void *fdt, + const char *name, int namelen) +{ + int aliasoffset; + + aliasoffset = fdt_path_offset(fdt, "/aliases"); + if (aliasoffset < 0) + return NULL; + + return fdt_getprop_namelen(fdt, aliasoffset, name, namelen, NULL); +} + +const char *fdt_get_alias(const void *fdt, const char *name) +{ + return fdt_get_alias_namelen(fdt, name, strlen(name)); +} + +int fdt_get_path(const void *fdt, int nodeoffset, char *buf, int buflen) +{ + int pdepth = 0, p = 0; + int offset, depth, namelen; + const char *name; + + FDT_RO_PROBE(fdt); + + if (buflen < 2) + return -FDT_ERR_NOSPACE; + + for (offset = 0, depth = 0; + (offset >= 0) && (offset <= nodeoffset); + offset = fdt_next_node(fdt, offset, &depth)) { + while (pdepth > depth) { + do { + p--; + } while (buf[p-1] != '/'); + pdepth--; + } + + if (pdepth >= depth) { + name = fdt_get_name(fdt, offset, &namelen); + if (!name) + return namelen; + if ((p + namelen + 1) <= buflen) { + memcpy(buf + p, name, namelen); + p += namelen; + buf[p++] = '/'; + pdepth++; + } + } + + if (offset == nodeoffset) { + if (pdepth < (depth + 1)) + return -FDT_ERR_NOSPACE; + + if (p > 1) /* special case so that root path is "/", not "" */ + p--; + buf[p] = '\0'; + return 0; + } + } + + if ((offset == -FDT_ERR_NOTFOUND) || (offset >= 0)) + return -FDT_ERR_BADOFFSET; + else if (offset == -FDT_ERR_BADOFFSET) + return -FDT_ERR_BADSTRUCTURE; + + return offset; /* error from fdt_next_node() */ +} + +int fdt_supernode_atdepth_offset(const void *fdt, int nodeoffset, + int supernodedepth, int *nodedepth) +{ + int offset, depth; + int supernodeoffset = -FDT_ERR_INTERNAL; + + FDT_RO_PROBE(fdt); + + if (supernodedepth < 0) + return -FDT_ERR_NOTFOUND; + + for (offset = 0, depth = 0; + (offset >= 0) && (offset <= nodeoffset); + offset = fdt_next_node(fdt, offset, &depth)) { + if (depth == supernodedepth) + supernodeoffset = offset; + + if (offset == nodeoffset) { + if (nodedepth) + *nodedepth = depth; + + if (supernodedepth > depth) + return -FDT_ERR_NOTFOUND; + else + return supernodeoffset; + } + } + + if ((offset == -FDT_ERR_NOTFOUND) || (offset >= 0)) + return -FDT_ERR_BADOFFSET; + else if (offset == -FDT_ERR_BADOFFSET) + return -FDT_ERR_BADSTRUCTURE; + + return offset; /* error from fdt_next_node() */ +} + +int fdt_node_depth(const void *fdt, int nodeoffset) +{ + int nodedepth; + int err; + + err = fdt_supernode_atdepth_offset(fdt, nodeoffset, 0, &nodedepth); + if (err) + return (err < 0) ? err : -FDT_ERR_INTERNAL; + return nodedepth; +} + +int fdt_parent_offset(const void *fdt, int nodeoffset) +{ + int nodedepth = fdt_node_depth(fdt, nodeoffset); + + if (nodedepth < 0) + return nodedepth; + return fdt_supernode_atdepth_offset(fdt, nodeoffset, + nodedepth - 1, NULL); +} + +int fdt_node_offset_by_prop_value(const void *fdt, int startoffset, + const char *propname, + const void *propval, int proplen) +{ + int offset; + const void *val; + int len; + + FDT_RO_PROBE(fdt); + + /* FIXME: The algorithm here is pretty horrible: we scan each + * property of a node in fdt_getprop(), then if that didn't + * find what we want, we scan over them again making our way + * to the next node. Still it's the easiest to implement + * approach; performance can come later. */ + for (offset = fdt_next_node(fdt, startoffset, NULL); + offset >= 0; + offset = fdt_next_node(fdt, offset, NULL)) { + val = fdt_getprop(fdt, offset, propname, &len); + if (val && (len == proplen) + && (memcmp(val, propval, len) == 0)) + return offset; + } + + return offset; /* error from fdt_next_node() */ +} + +int fdt_node_offset_by_phandle(const void *fdt, uint32_t phandle) +{ + int offset; + + if ((phandle == 0) || (phandle == -1)) + return -FDT_ERR_BADPHANDLE; + + FDT_RO_PROBE(fdt); + + /* FIXME: The algorithm here is pretty horrible: we + * potentially scan each property of a node in + * fdt_get_phandle(), then if that didn't find what + * we want, we scan over them again making our way to the next + * node. Still it's the easiest to implement approach; + * performance can come later. */ + for (offset = fdt_next_node(fdt, -1, NULL); + offset >= 0; + offset = fdt_next_node(fdt, offset, NULL)) { + if (fdt_get_phandle(fdt, offset) == phandle) + return offset; + } + + return offset; /* error from fdt_next_node() */ +} + +int fdt_stringlist_contains(const char *strlist, int listlen, const char *str) +{ + int len = strlen(str); + const char *p; + + while (listlen >= len) { + if (memcmp(str, strlist, len+1) == 0) + return 1; + p = memchr(strlist, '\0', listlen); + if (!p) + return 0; /* malformed strlist.. */ + listlen -= (p-strlist) + 1; + strlist = p + 1; + } + return 0; +} + +int fdt_stringlist_count(const void *fdt, int nodeoffset, const char *property) +{ + const char *list, *end; + int length, count = 0; + + list = fdt_getprop(fdt, nodeoffset, property, &length); + if (!list) + return length; + + end = list + length; + + while (list < end) { + length = strnlen(list, end - list) + 1; + + /* Abort if the last string isn't properly NUL-terminated. */ + if (list + length > end) + return -FDT_ERR_BADVALUE; + + list += length; + count++; + } + + return count; +} + +int fdt_stringlist_search(const void *fdt, int nodeoffset, const char *property, + const char *string) +{ + int length, len, idx = 0; + const char *list, *end; + + list = fdt_getprop(fdt, nodeoffset, property, &length); + if (!list) + return length; + + len = strlen(string) + 1; + end = list + length; + + while (list < end) { + length = strnlen(list, end - list) + 1; + + /* Abort if the last string isn't properly NUL-terminated. */ + if (list + length > end) + return -FDT_ERR_BADVALUE; + + if (length == len && memcmp(list, string, length) == 0) + return idx; + + list += length; + idx++; + } + + return -FDT_ERR_NOTFOUND; +} + +const char *fdt_stringlist_get(const void *fdt, int nodeoffset, + const char *property, int idx, + int *lenp) +{ + const char *list, *end; + int length; + + list = fdt_getprop(fdt, nodeoffset, property, &length); + if (!list) { + if (lenp) + *lenp = length; + + return NULL; + } + + end = list + length; + + while (list < end) { + length = strnlen(list, end - list) + 1; + + /* Abort if the last string isn't properly NUL-terminated. */ + if (list + length > end) { + if (lenp) + *lenp = -FDT_ERR_BADVALUE; + + return NULL; + } + + if (idx == 0) { + if (lenp) + *lenp = length - 1; + + return list; + } + + list += length; + idx--; + } + + if (lenp) + *lenp = -FDT_ERR_NOTFOUND; + + return NULL; +} + +int fdt_node_check_compatible(const void *fdt, int nodeoffset, + const char *compatible) +{ + const void *prop; + int len; + + prop = fdt_getprop(fdt, nodeoffset, "compatible", &len); + if (!prop) + return len; + + return !fdt_stringlist_contains(prop, len, compatible); +} + +int fdt_node_offset_by_compatible(const void *fdt, int startoffset, + const char *compatible) +{ + int offset, err; + + FDT_RO_PROBE(fdt); + + /* FIXME: The algorithm here is pretty horrible: we scan each + * property of a node in fdt_node_check_compatible(), then if + * that didn't find what we want, we scan over them again + * making our way to the next node. Still it's the easiest to + * implement approach; performance can come later. */ + for (offset = fdt_next_node(fdt, startoffset, NULL); + offset >= 0; + offset = fdt_next_node(fdt, offset, NULL)) { + err = fdt_node_check_compatible(fdt, offset, compatible); + if ((err < 0) && (err != -FDT_ERR_NOTFOUND)) + return err; + else if (err == 0) + return offset; + } + + return offset; /* error from fdt_next_node() */ +} + +int fdt_check_full(const void *fdt, size_t bufsize) +{ + int err; + int num_memrsv; + int offset, nextoffset = 0; + uint32_t tag; + unsigned depth = 0; + const void *prop; + const char *propname; + + if (bufsize < FDT_V1_SIZE) + return -FDT_ERR_TRUNCATED; + err = fdt_check_header(fdt); + if (err != 0) + return err; + if (bufsize < fdt_totalsize(fdt)) + return -FDT_ERR_TRUNCATED; + + num_memrsv = fdt_num_mem_rsv(fdt); + if (num_memrsv < 0) + return num_memrsv; + + while (1) { + offset = nextoffset; + tag = fdt_next_tag(fdt, offset, &nextoffset); + + if (nextoffset < 0) + return nextoffset; + + switch (tag) { + case FDT_NOP: + break; + + case FDT_END: + if (depth != 0) + return -FDT_ERR_BADSTRUCTURE; + return 0; + + case FDT_BEGIN_NODE: + depth++; + if (depth > INT_MAX) + return -FDT_ERR_BADSTRUCTURE; + break; + + case FDT_END_NODE: + if (depth == 0) + return -FDT_ERR_BADSTRUCTURE; + depth--; + break; + + case FDT_PROP: + prop = fdt_getprop_by_offset(fdt, offset, &propname, + &err); + if (!prop) + return err; + break; + + default: + return -FDT_ERR_INTERNAL; + } + } +} diff --git a/vendor/riscv-isa-sim/fdt/fdt_rw.c b/vendor/riscv-isa-sim/fdt/fdt_rw.c new file mode 100644 index 00000000..8795947c --- /dev/null +++ b/vendor/riscv-isa-sim/fdt/fdt_rw.c @@ -0,0 +1,476 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) +/* + * libfdt - Flat Device Tree manipulation + * Copyright (C) 2006 David Gibson, IBM Corporation. + */ +#include "libfdt_env.h" + +#include +#include + +#include "libfdt_internal.h" + +static int fdt_blocks_misordered_(const void *fdt, + int mem_rsv_size, int struct_size) +{ + return (fdt_off_mem_rsvmap(fdt) < FDT_ALIGN(sizeof(struct fdt_header), 8)) + || (fdt_off_dt_struct(fdt) < + (fdt_off_mem_rsvmap(fdt) + mem_rsv_size)) + || (fdt_off_dt_strings(fdt) < + (fdt_off_dt_struct(fdt) + struct_size)) + || (fdt_totalsize(fdt) < + (fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt))); +} + +static int fdt_rw_probe_(void *fdt) +{ + FDT_RO_PROBE(fdt); + + if (fdt_version(fdt) < 17) + return -FDT_ERR_BADVERSION; + if (fdt_blocks_misordered_(fdt, sizeof(struct fdt_reserve_entry), + fdt_size_dt_struct(fdt))) + return -FDT_ERR_BADLAYOUT; + if (fdt_version(fdt) > 17) + fdt_set_version(fdt, 17); + + return 0; +} + +#define FDT_RW_PROBE(fdt) \ + { \ + int err_; \ + if ((err_ = fdt_rw_probe_(fdt)) != 0) \ + return err_; \ + } + +static inline int fdt_data_size_(void *fdt) +{ + return fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt); +} + +static int fdt_splice_(void *fdt, void *splicepoint, int oldlen, int newlen) +{ + char *p = splicepoint; + char *end = (char *)fdt + fdt_data_size_(fdt); + + if (((p + oldlen) < p) || ((p + oldlen) > end)) + return -FDT_ERR_BADOFFSET; + if ((p < (char *)fdt) || ((end - oldlen + newlen) < (char *)fdt)) + return -FDT_ERR_BADOFFSET; + if ((end - oldlen + newlen) > ((char *)fdt + fdt_totalsize(fdt))) + return -FDT_ERR_NOSPACE; + memmove(p + newlen, p + oldlen, end - p - oldlen); + return 0; +} + +static int fdt_splice_mem_rsv_(void *fdt, struct fdt_reserve_entry *p, + int oldn, int newn) +{ + int delta = (newn - oldn) * sizeof(*p); + int err; + err = fdt_splice_(fdt, p, oldn * sizeof(*p), newn * sizeof(*p)); + if (err) + return err; + fdt_set_off_dt_struct(fdt, fdt_off_dt_struct(fdt) + delta); + fdt_set_off_dt_strings(fdt, fdt_off_dt_strings(fdt) + delta); + return 0; +} + +static int fdt_splice_struct_(void *fdt, void *p, + int oldlen, int newlen) +{ + int delta = newlen - oldlen; + int err; + + if ((err = fdt_splice_(fdt, p, oldlen, newlen))) + return err; + + fdt_set_size_dt_struct(fdt, fdt_size_dt_struct(fdt) + delta); + fdt_set_off_dt_strings(fdt, fdt_off_dt_strings(fdt) + delta); + return 0; +} + +/* Must only be used to roll back in case of error */ +static void fdt_del_last_string_(void *fdt, const char *s) +{ + int newlen = strlen(s) + 1; + + fdt_set_size_dt_strings(fdt, fdt_size_dt_strings(fdt) - newlen); +} + +static int fdt_splice_string_(void *fdt, int newlen) +{ + void *p = (char *)fdt + + fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt); + int err; + + if ((err = fdt_splice_(fdt, p, 0, newlen))) + return err; + + fdt_set_size_dt_strings(fdt, fdt_size_dt_strings(fdt) + newlen); + return 0; +} + +static int fdt_find_add_string_(void *fdt, const char *s, int *allocated) +{ + char *strtab = (char *)fdt + fdt_off_dt_strings(fdt); + const char *p; + char *new; + int len = strlen(s) + 1; + int err; + + *allocated = 0; + + p = fdt_find_string_(strtab, fdt_size_dt_strings(fdt), s); + if (p) + /* found it */ + return (p - strtab); + + new = strtab + fdt_size_dt_strings(fdt); + err = fdt_splice_string_(fdt, len); + if (err) + return err; + + *allocated = 1; + + memcpy(new, s, len); + return (new - strtab); +} + +int fdt_add_mem_rsv(void *fdt, uint64_t address, uint64_t size) +{ + struct fdt_reserve_entry *re; + int err; + + FDT_RW_PROBE(fdt); + + re = fdt_mem_rsv_w_(fdt, fdt_num_mem_rsv(fdt)); + err = fdt_splice_mem_rsv_(fdt, re, 0, 1); + if (err) + return err; + + re->address = cpu_to_fdt64(address); + re->size = cpu_to_fdt64(size); + return 0; +} + +int fdt_del_mem_rsv(void *fdt, int n) +{ + struct fdt_reserve_entry *re = fdt_mem_rsv_w_(fdt, n); + + FDT_RW_PROBE(fdt); + + if (n >= fdt_num_mem_rsv(fdt)) + return -FDT_ERR_NOTFOUND; + + return fdt_splice_mem_rsv_(fdt, re, 1, 0); +} + +static int fdt_resize_property_(void *fdt, int nodeoffset, const char *name, + int len, struct fdt_property **prop) +{ + int oldlen; + int err; + + *prop = fdt_get_property_w(fdt, nodeoffset, name, &oldlen); + if (!*prop) + return oldlen; + + if ((err = fdt_splice_struct_(fdt, (*prop)->data, FDT_TAGALIGN(oldlen), + FDT_TAGALIGN(len)))) + return err; + + (*prop)->len = cpu_to_fdt32(len); + return 0; +} + +static int fdt_add_property_(void *fdt, int nodeoffset, const char *name, + int len, struct fdt_property **prop) +{ + int proplen; + int nextoffset; + int namestroff; + int err; + int allocated; + + if ((nextoffset = fdt_check_node_offset_(fdt, nodeoffset)) < 0) + return nextoffset; + + namestroff = fdt_find_add_string_(fdt, name, &allocated); + if (namestroff < 0) + return namestroff; + + *prop = fdt_offset_ptr_w_(fdt, nextoffset); + proplen = sizeof(**prop) + FDT_TAGALIGN(len); + + err = fdt_splice_struct_(fdt, *prop, 0, proplen); + if (err) { + if (allocated) + fdt_del_last_string_(fdt, name); + return err; + } + + (*prop)->tag = cpu_to_fdt32(FDT_PROP); + (*prop)->nameoff = cpu_to_fdt32(namestroff); + (*prop)->len = cpu_to_fdt32(len); + return 0; +} + +int fdt_set_name(void *fdt, int nodeoffset, const char *name) +{ + char *namep; + int oldlen, newlen; + int err; + + FDT_RW_PROBE(fdt); + + namep = (char *)(uintptr_t)fdt_get_name(fdt, nodeoffset, &oldlen); + if (!namep) + return oldlen; + + newlen = strlen(name); + + err = fdt_splice_struct_(fdt, namep, FDT_TAGALIGN(oldlen+1), + FDT_TAGALIGN(newlen+1)); + if (err) + return err; + + memcpy(namep, name, newlen+1); + return 0; +} + +int fdt_setprop_placeholder(void *fdt, int nodeoffset, const char *name, + int len, void **prop_data) +{ + struct fdt_property *prop; + int err; + + FDT_RW_PROBE(fdt); + + err = fdt_resize_property_(fdt, nodeoffset, name, len, &prop); + if (err == -FDT_ERR_NOTFOUND) + err = fdt_add_property_(fdt, nodeoffset, name, len, &prop); + if (err) + return err; + + *prop_data = prop->data; + return 0; +} + +int fdt_setprop(void *fdt, int nodeoffset, const char *name, + const void *val, int len) +{ + void *prop_data; + int err; + + err = fdt_setprop_placeholder(fdt, nodeoffset, name, len, &prop_data); + if (err) + return err; + + if (len) + memcpy(prop_data, val, len); + return 0; +} + +int fdt_appendprop(void *fdt, int nodeoffset, const char *name, + const void *val, int len) +{ + struct fdt_property *prop; + int err, oldlen, newlen; + + FDT_RW_PROBE(fdt); + + prop = fdt_get_property_w(fdt, nodeoffset, name, &oldlen); + if (prop) { + newlen = len + oldlen; + err = fdt_splice_struct_(fdt, prop->data, + FDT_TAGALIGN(oldlen), + FDT_TAGALIGN(newlen)); + if (err) + return err; + prop->len = cpu_to_fdt32(newlen); + memcpy(prop->data + oldlen, val, len); + } else { + err = fdt_add_property_(fdt, nodeoffset, name, len, &prop); + if (err) + return err; + memcpy(prop->data, val, len); + } + return 0; +} + +int fdt_delprop(void *fdt, int nodeoffset, const char *name) +{ + struct fdt_property *prop; + int len, proplen; + + FDT_RW_PROBE(fdt); + + prop = fdt_get_property_w(fdt, nodeoffset, name, &len); + if (!prop) + return len; + + proplen = sizeof(*prop) + FDT_TAGALIGN(len); + return fdt_splice_struct_(fdt, prop, proplen, 0); +} + +int fdt_add_subnode_namelen(void *fdt, int parentoffset, + const char *name, int namelen) +{ + struct fdt_node_header *nh; + int offset, nextoffset; + int nodelen; + int err; + uint32_t tag; + fdt32_t *endtag; + + FDT_RW_PROBE(fdt); + + offset = fdt_subnode_offset_namelen(fdt, parentoffset, name, namelen); + if (offset >= 0) + return -FDT_ERR_EXISTS; + else if (offset != -FDT_ERR_NOTFOUND) + return offset; + + /* Try to place the new node after the parent's properties */ + fdt_next_tag(fdt, parentoffset, &nextoffset); /* skip the BEGIN_NODE */ + do { + offset = nextoffset; + tag = fdt_next_tag(fdt, offset, &nextoffset); + } while ((tag == FDT_PROP) || (tag == FDT_NOP)); + + nh = fdt_offset_ptr_w_(fdt, offset); + nodelen = sizeof(*nh) + FDT_TAGALIGN(namelen+1) + FDT_TAGSIZE; + + err = fdt_splice_struct_(fdt, nh, 0, nodelen); + if (err) + return err; + + nh->tag = cpu_to_fdt32(FDT_BEGIN_NODE); + memset(nh->name, 0, FDT_TAGALIGN(namelen+1)); + memcpy(nh->name, name, namelen); + endtag = (fdt32_t *)((char *)nh + nodelen - FDT_TAGSIZE); + *endtag = cpu_to_fdt32(FDT_END_NODE); + + return offset; +} + +int fdt_add_subnode(void *fdt, int parentoffset, const char *name) +{ + return fdt_add_subnode_namelen(fdt, parentoffset, name, strlen(name)); +} + +int fdt_del_node(void *fdt, int nodeoffset) +{ + int endoffset; + + FDT_RW_PROBE(fdt); + + endoffset = fdt_node_end_offset_(fdt, nodeoffset); + if (endoffset < 0) + return endoffset; + + return fdt_splice_struct_(fdt, fdt_offset_ptr_w_(fdt, nodeoffset), + endoffset - nodeoffset, 0); +} + +static void fdt_packblocks_(const char *old, char *new, + int mem_rsv_size, int struct_size) +{ + int mem_rsv_off, struct_off, strings_off; + + mem_rsv_off = FDT_ALIGN(sizeof(struct fdt_header), 8); + struct_off = mem_rsv_off + mem_rsv_size; + strings_off = struct_off + struct_size; + + memmove(new + mem_rsv_off, old + fdt_off_mem_rsvmap(old), mem_rsv_size); + fdt_set_off_mem_rsvmap(new, mem_rsv_off); + + memmove(new + struct_off, old + fdt_off_dt_struct(old), struct_size); + fdt_set_off_dt_struct(new, struct_off); + fdt_set_size_dt_struct(new, struct_size); + + memmove(new + strings_off, old + fdt_off_dt_strings(old), + fdt_size_dt_strings(old)); + fdt_set_off_dt_strings(new, strings_off); + fdt_set_size_dt_strings(new, fdt_size_dt_strings(old)); +} + +int fdt_open_into(const void *fdt, void *buf, int bufsize) +{ + int err; + int mem_rsv_size, struct_size; + int newsize; + const char *fdtstart = fdt; + const char *fdtend = fdtstart + fdt_totalsize(fdt); + char *tmp; + + FDT_RO_PROBE(fdt); + + mem_rsv_size = (fdt_num_mem_rsv(fdt)+1) + * sizeof(struct fdt_reserve_entry); + + if (fdt_version(fdt) >= 17) { + struct_size = fdt_size_dt_struct(fdt); + } else { + struct_size = 0; + while (fdt_next_tag(fdt, struct_size, &struct_size) != FDT_END) + ; + if (struct_size < 0) + return struct_size; + } + + if (!fdt_blocks_misordered_(fdt, mem_rsv_size, struct_size)) { + /* no further work necessary */ + err = fdt_move(fdt, buf, bufsize); + if (err) + return err; + fdt_set_version(buf, 17); + fdt_set_size_dt_struct(buf, struct_size); + fdt_set_totalsize(buf, bufsize); + return 0; + } + + /* Need to reorder */ + newsize = FDT_ALIGN(sizeof(struct fdt_header), 8) + mem_rsv_size + + struct_size + fdt_size_dt_strings(fdt); + + if (bufsize < newsize) + return -FDT_ERR_NOSPACE; + + /* First attempt to build converted tree at beginning of buffer */ + tmp = buf; + /* But if that overlaps with the old tree... */ + if (((tmp + newsize) > fdtstart) && (tmp < fdtend)) { + /* Try right after the old tree instead */ + tmp = (char *)(uintptr_t)fdtend; + if ((tmp + newsize) > ((char *)buf + bufsize)) + return -FDT_ERR_NOSPACE; + } + + fdt_packblocks_(fdt, tmp, mem_rsv_size, struct_size); + memmove(buf, tmp, newsize); + + fdt_set_magic(buf, FDT_MAGIC); + fdt_set_totalsize(buf, bufsize); + fdt_set_version(buf, 17); + fdt_set_last_comp_version(buf, 16); + fdt_set_boot_cpuid_phys(buf, fdt_boot_cpuid_phys(fdt)); + + return 0; +} + +int fdt_pack(void *fdt) +{ + int mem_rsv_size; + + FDT_RW_PROBE(fdt); + + mem_rsv_size = (fdt_num_mem_rsv(fdt)+1) + * sizeof(struct fdt_reserve_entry); + fdt_packblocks_(fdt, fdt, mem_rsv_size, fdt_size_dt_struct(fdt)); + fdt_set_totalsize(fdt, fdt_data_size_(fdt)); + + return 0; +} diff --git a/vendor/riscv-isa-sim/fdt/fdt_strerror.c b/vendor/riscv-isa-sim/fdt/fdt_strerror.c new file mode 100644 index 00000000..768db66e --- /dev/null +++ b/vendor/riscv-isa-sim/fdt/fdt_strerror.c @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) +/* + * libfdt - Flat Device Tree manipulation + * Copyright (C) 2006 David Gibson, IBM Corporation. + * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "libfdt_env.h" + +#include +#include + +#include "libfdt_internal.h" + +struct fdt_errtabent { + const char *str; +}; + +#define FDT_ERRTABENT(val) \ + [(val)] = { .str = #val, } + +static struct fdt_errtabent fdt_errtable[] = { + FDT_ERRTABENT(FDT_ERR_NOTFOUND), + FDT_ERRTABENT(FDT_ERR_EXISTS), + FDT_ERRTABENT(FDT_ERR_NOSPACE), + + FDT_ERRTABENT(FDT_ERR_BADOFFSET), + FDT_ERRTABENT(FDT_ERR_BADPATH), + FDT_ERRTABENT(FDT_ERR_BADPHANDLE), + FDT_ERRTABENT(FDT_ERR_BADSTATE), + + FDT_ERRTABENT(FDT_ERR_TRUNCATED), + FDT_ERRTABENT(FDT_ERR_BADMAGIC), + FDT_ERRTABENT(FDT_ERR_BADVERSION), + FDT_ERRTABENT(FDT_ERR_BADSTRUCTURE), + FDT_ERRTABENT(FDT_ERR_BADLAYOUT), + FDT_ERRTABENT(FDT_ERR_INTERNAL), + FDT_ERRTABENT(FDT_ERR_BADNCELLS), + FDT_ERRTABENT(FDT_ERR_BADVALUE), + FDT_ERRTABENT(FDT_ERR_BADOVERLAY), + FDT_ERRTABENT(FDT_ERR_NOPHANDLES), + FDT_ERRTABENT(FDT_ERR_BADFLAGS), +}; +#define FDT_ERRTABSIZE (sizeof(fdt_errtable) / sizeof(fdt_errtable[0])) + +const char *fdt_strerror(int errval) +{ + if (errval > 0) + return ""; + else if (errval == 0) + return ""; + else if (errval > -FDT_ERRTABSIZE) { + const char *s = fdt_errtable[-errval].str; + + if (s) + return s; + } + + return ""; +} diff --git a/vendor/riscv-isa-sim/fdt/fdt_sw.c b/vendor/riscv-isa-sim/fdt/fdt_sw.c new file mode 100644 index 00000000..76bea22f --- /dev/null +++ b/vendor/riscv-isa-sim/fdt/fdt_sw.c @@ -0,0 +1,376 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) +/* + * libfdt - Flat Device Tree manipulation + * Copyright (C) 2006 David Gibson, IBM Corporation. + */ +#include "libfdt_env.h" + +#include +#include + +#include "libfdt_internal.h" + +static int fdt_sw_probe_(void *fdt) +{ + if (fdt_magic(fdt) == FDT_MAGIC) + return -FDT_ERR_BADSTATE; + else if (fdt_magic(fdt) != FDT_SW_MAGIC) + return -FDT_ERR_BADMAGIC; + return 0; +} + +#define FDT_SW_PROBE(fdt) \ + { \ + int err; \ + if ((err = fdt_sw_probe_(fdt)) != 0) \ + return err; \ + } + +/* 'memrsv' state: Initial state after fdt_create() + * + * Allowed functions: + * fdt_add_reservmap_entry() + * fdt_finish_reservemap() [moves to 'struct' state] + */ +static int fdt_sw_probe_memrsv_(void *fdt) +{ + int err = fdt_sw_probe_(fdt); + if (err) + return err; + + if (fdt_off_dt_strings(fdt) != 0) + return -FDT_ERR_BADSTATE; + return 0; +} + +#define FDT_SW_PROBE_MEMRSV(fdt) \ + { \ + int err; \ + if ((err = fdt_sw_probe_memrsv_(fdt)) != 0) \ + return err; \ + } + +/* 'struct' state: Enter this state after fdt_finish_reservemap() + * + * Allowed functions: + * fdt_begin_node() + * fdt_end_node() + * fdt_property*() + * fdt_finish() [moves to 'complete' state] + */ +static int fdt_sw_probe_struct_(void *fdt) +{ + int err = fdt_sw_probe_(fdt); + if (err) + return err; + + if (fdt_off_dt_strings(fdt) != fdt_totalsize(fdt)) + return -FDT_ERR_BADSTATE; + return 0; +} + +#define FDT_SW_PROBE_STRUCT(fdt) \ + { \ + int err; \ + if ((err = fdt_sw_probe_struct_(fdt)) != 0) \ + return err; \ + } + +static inline uint32_t sw_flags(void *fdt) +{ + /* assert: (fdt_magic(fdt) == FDT_SW_MAGIC) */ + return fdt_last_comp_version(fdt); +} + +/* 'complete' state: Enter this state after fdt_finish() + * + * Allowed functions: none + */ + +static void *fdt_grab_space_(void *fdt, size_t len) +{ + int offset = fdt_size_dt_struct(fdt); + int spaceleft; + + spaceleft = fdt_totalsize(fdt) - fdt_off_dt_struct(fdt) + - fdt_size_dt_strings(fdt); + + if ((offset + len < offset) || (offset + len > spaceleft)) + return NULL; + + fdt_set_size_dt_struct(fdt, offset + len); + return fdt_offset_ptr_w_(fdt, offset); +} + +int fdt_create_with_flags(void *buf, int bufsize, uint32_t flags) +{ + const size_t hdrsize = FDT_ALIGN(sizeof(struct fdt_header), + sizeof(struct fdt_reserve_entry)); + void *fdt = buf; + + if (bufsize < hdrsize) + return -FDT_ERR_NOSPACE; + + if (flags & ~FDT_CREATE_FLAGS_ALL) + return -FDT_ERR_BADFLAGS; + + memset(buf, 0, bufsize); + + /* + * magic and last_comp_version keep intermediate state during the fdt + * creation process, which is replaced with the proper FDT format by + * fdt_finish(). + * + * flags should be accessed with sw_flags(). + */ + fdt_set_magic(fdt, FDT_SW_MAGIC); + fdt_set_version(fdt, FDT_LAST_SUPPORTED_VERSION); + fdt_set_last_comp_version(fdt, flags); + + fdt_set_totalsize(fdt, bufsize); + + fdt_set_off_mem_rsvmap(fdt, hdrsize); + fdt_set_off_dt_struct(fdt, fdt_off_mem_rsvmap(fdt)); + fdt_set_off_dt_strings(fdt, 0); + + return 0; +} + +int fdt_create(void *buf, int bufsize) +{ + return fdt_create_with_flags(buf, bufsize, 0); +} + +int fdt_resize(void *fdt, void *buf, int bufsize) +{ + size_t headsize, tailsize; + char *oldtail, *newtail; + + FDT_SW_PROBE(fdt); + + headsize = fdt_off_dt_struct(fdt) + fdt_size_dt_struct(fdt); + tailsize = fdt_size_dt_strings(fdt); + + if ((headsize + tailsize) > fdt_totalsize(fdt)) + return -FDT_ERR_INTERNAL; + + if ((headsize + tailsize) > bufsize) + return -FDT_ERR_NOSPACE; + + oldtail = (char *)fdt + fdt_totalsize(fdt) - tailsize; + newtail = (char *)buf + bufsize - tailsize; + + /* Two cases to avoid clobbering data if the old and new + * buffers partially overlap */ + if (buf <= fdt) { + memmove(buf, fdt, headsize); + memmove(newtail, oldtail, tailsize); + } else { + memmove(newtail, oldtail, tailsize); + memmove(buf, fdt, headsize); + } + + fdt_set_totalsize(buf, bufsize); + if (fdt_off_dt_strings(buf)) + fdt_set_off_dt_strings(buf, bufsize); + + return 0; +} + +int fdt_add_reservemap_entry(void *fdt, uint64_t addr, uint64_t size) +{ + struct fdt_reserve_entry *re; + int offset; + + FDT_SW_PROBE_MEMRSV(fdt); + + offset = fdt_off_dt_struct(fdt); + if ((offset + sizeof(*re)) > fdt_totalsize(fdt)) + return -FDT_ERR_NOSPACE; + + re = (struct fdt_reserve_entry *)((char *)fdt + offset); + re->address = cpu_to_fdt64(addr); + re->size = cpu_to_fdt64(size); + + fdt_set_off_dt_struct(fdt, offset + sizeof(*re)); + + return 0; +} + +int fdt_finish_reservemap(void *fdt) +{ + int err = fdt_add_reservemap_entry(fdt, 0, 0); + + if (err) + return err; + + fdt_set_off_dt_strings(fdt, fdt_totalsize(fdt)); + return 0; +} + +int fdt_begin_node(void *fdt, const char *name) +{ + struct fdt_node_header *nh; + int namelen; + + FDT_SW_PROBE_STRUCT(fdt); + + namelen = strlen(name) + 1; + nh = fdt_grab_space_(fdt, sizeof(*nh) + FDT_TAGALIGN(namelen)); + if (! nh) + return -FDT_ERR_NOSPACE; + + nh->tag = cpu_to_fdt32(FDT_BEGIN_NODE); + memcpy(nh->name, name, namelen); + return 0; +} + +int fdt_end_node(void *fdt) +{ + fdt32_t *en; + + FDT_SW_PROBE_STRUCT(fdt); + + en = fdt_grab_space_(fdt, FDT_TAGSIZE); + if (! en) + return -FDT_ERR_NOSPACE; + + *en = cpu_to_fdt32(FDT_END_NODE); + return 0; +} + +static int fdt_add_string_(void *fdt, const char *s) +{ + char *strtab = (char *)fdt + fdt_totalsize(fdt); + int strtabsize = fdt_size_dt_strings(fdt); + int len = strlen(s) + 1; + int struct_top, offset; + + offset = -strtabsize - len; + struct_top = fdt_off_dt_struct(fdt) + fdt_size_dt_struct(fdt); + if (fdt_totalsize(fdt) + offset < struct_top) + return 0; /* no more room :( */ + + memcpy(strtab + offset, s, len); + fdt_set_size_dt_strings(fdt, strtabsize + len); + return offset; +} + +/* Must only be used to roll back in case of error */ +static void fdt_del_last_string_(void *fdt, const char *s) +{ + int strtabsize = fdt_size_dt_strings(fdt); + int len = strlen(s) + 1; + + fdt_set_size_dt_strings(fdt, strtabsize - len); +} + +static int fdt_find_add_string_(void *fdt, const char *s, int *allocated) +{ + char *strtab = (char *)fdt + fdt_totalsize(fdt); + int strtabsize = fdt_size_dt_strings(fdt); + const char *p; + + *allocated = 0; + + p = fdt_find_string_(strtab - strtabsize, strtabsize, s); + if (p) + return p - strtab; + + *allocated = 1; + + return fdt_add_string_(fdt, s); +} + +int fdt_property_placeholder(void *fdt, const char *name, int len, void **valp) +{ + struct fdt_property *prop; + int nameoff; + int allocated; + + FDT_SW_PROBE_STRUCT(fdt); + + /* String de-duplication can be slow, _NO_NAME_DEDUP skips it */ + if (sw_flags(fdt) & FDT_CREATE_FLAG_NO_NAME_DEDUP) { + allocated = 1; + nameoff = fdt_add_string_(fdt, name); + } else { + nameoff = fdt_find_add_string_(fdt, name, &allocated); + } + if (nameoff == 0) + return -FDT_ERR_NOSPACE; + + prop = fdt_grab_space_(fdt, sizeof(*prop) + FDT_TAGALIGN(len)); + if (! prop) { + if (allocated) + fdt_del_last_string_(fdt, name); + return -FDT_ERR_NOSPACE; + } + + prop->tag = cpu_to_fdt32(FDT_PROP); + prop->nameoff = cpu_to_fdt32(nameoff); + prop->len = cpu_to_fdt32(len); + *valp = prop->data; + return 0; +} + +int fdt_property(void *fdt, const char *name, const void *val, int len) +{ + void *ptr; + int ret; + + ret = fdt_property_placeholder(fdt, name, len, &ptr); + if (ret) + return ret; + memcpy(ptr, val, len); + return 0; +} + +int fdt_finish(void *fdt) +{ + char *p = (char *)fdt; + fdt32_t *end; + int oldstroffset, newstroffset; + uint32_t tag; + int offset, nextoffset; + + FDT_SW_PROBE_STRUCT(fdt); + + /* Add terminator */ + end = fdt_grab_space_(fdt, sizeof(*end)); + if (! end) + return -FDT_ERR_NOSPACE; + *end = cpu_to_fdt32(FDT_END); + + /* Relocate the string table */ + oldstroffset = fdt_totalsize(fdt) - fdt_size_dt_strings(fdt); + newstroffset = fdt_off_dt_struct(fdt) + fdt_size_dt_struct(fdt); + memmove(p + newstroffset, p + oldstroffset, fdt_size_dt_strings(fdt)); + fdt_set_off_dt_strings(fdt, newstroffset); + + /* Walk the structure, correcting string offsets */ + offset = 0; + while ((tag = fdt_next_tag(fdt, offset, &nextoffset)) != FDT_END) { + if (tag == FDT_PROP) { + struct fdt_property *prop = + fdt_offset_ptr_w_(fdt, offset); + int nameoff; + + nameoff = fdt32_to_cpu(prop->nameoff); + nameoff += fdt_size_dt_strings(fdt); + prop->nameoff = cpu_to_fdt32(nameoff); + } + offset = nextoffset; + } + if (nextoffset < 0) + return nextoffset; + + /* Finally, adjust the header */ + fdt_set_totalsize(fdt, newstroffset + fdt_size_dt_strings(fdt)); + + /* And fix up fields that were keeping intermediate state. */ + fdt_set_last_comp_version(fdt, FDT_FIRST_SUPPORTED_VERSION); + fdt_set_magic(fdt, FDT_MAGIC); + + return 0; +} diff --git a/vendor/riscv-isa-sim/fdt/fdt_wip.c b/vendor/riscv-isa-sim/fdt/fdt_wip.c new file mode 100644 index 00000000..f64139e0 --- /dev/null +++ b/vendor/riscv-isa-sim/fdt/fdt_wip.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) +/* + * libfdt - Flat Device Tree manipulation + * Copyright (C) 2006 David Gibson, IBM Corporation. + */ +#include "libfdt_env.h" + +#include +#include + +#include "libfdt_internal.h" + +int fdt_setprop_inplace_namelen_partial(void *fdt, int nodeoffset, + const char *name, int namelen, + uint32_t idx, const void *val, + int len) +{ + void *propval; + int proplen; + + propval = fdt_getprop_namelen_w(fdt, nodeoffset, name, namelen, + &proplen); + if (!propval) + return proplen; + + if (proplen < (len + idx)) + return -FDT_ERR_NOSPACE; + + memcpy((char *)propval + idx, val, len); + return 0; +} + +int fdt_setprop_inplace(void *fdt, int nodeoffset, const char *name, + const void *val, int len) +{ + const void *propval; + int proplen; + + propval = fdt_getprop(fdt, nodeoffset, name, &proplen); + if (!propval) + return proplen; + + if (proplen != len) + return -FDT_ERR_NOSPACE; + + return fdt_setprop_inplace_namelen_partial(fdt, nodeoffset, name, + strlen(name), 0, + val, len); +} + +static void fdt_nop_region_(void *start, int len) +{ + fdt32_t *p; + + for (p = start; (char *)p < ((char *)start + len); p++) + *p = cpu_to_fdt32(FDT_NOP); +} + +int fdt_nop_property(void *fdt, int nodeoffset, const char *name) +{ + struct fdt_property *prop; + int len; + + prop = fdt_get_property_w(fdt, nodeoffset, name, &len); + if (!prop) + return len; + + fdt_nop_region_(prop, len + sizeof(*prop)); + + return 0; +} + +int fdt_node_end_offset_(void *fdt, int offset) +{ + int depth = 0; + + while ((offset >= 0) && (depth >= 0)) + offset = fdt_next_node(fdt, offset, &depth); + + return offset; +} + +int fdt_nop_node(void *fdt, int nodeoffset) +{ + int endoffset; + + endoffset = fdt_node_end_offset_(fdt, nodeoffset); + if (endoffset < 0) + return endoffset; + + fdt_nop_region_(fdt_offset_ptr_w(fdt, nodeoffset, 0), + endoffset - nodeoffset); + return 0; +} diff --git a/vendor/riscv-isa-sim/fdt/libfdt.h b/vendor/riscv-isa-sim/fdt/libfdt.h new file mode 100644 index 00000000..d2356cce --- /dev/null +++ b/vendor/riscv-isa-sim/fdt/libfdt.h @@ -0,0 +1,2077 @@ +/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) */ +#ifndef LIBFDT_H +#define LIBFDT_H +/* + * libfdt - Flat Device Tree manipulation + * Copyright (C) 2006 David Gibson, IBM Corporation. + */ + +#include +#include + +#define FDT_FIRST_SUPPORTED_VERSION 0x02 +#define FDT_LAST_SUPPORTED_VERSION 0x11 + +/* Error codes: informative error codes */ +#define FDT_ERR_NOTFOUND 1 + /* FDT_ERR_NOTFOUND: The requested node or property does not exist */ +#define FDT_ERR_EXISTS 2 + /* FDT_ERR_EXISTS: Attempted to create a node or property which + * already exists */ +#define FDT_ERR_NOSPACE 3 + /* FDT_ERR_NOSPACE: Operation needed to expand the device + * tree, but its buffer did not have sufficient space to + * contain the expanded tree. Use fdt_open_into() to move the + * device tree to a buffer with more space. */ + +/* Error codes: codes for bad parameters */ +#define FDT_ERR_BADOFFSET 4 + /* FDT_ERR_BADOFFSET: Function was passed a structure block + * offset which is out-of-bounds, or which points to an + * unsuitable part of the structure for the operation. */ +#define FDT_ERR_BADPATH 5 + /* FDT_ERR_BADPATH: Function was passed a badly formatted path + * (e.g. missing a leading / for a function which requires an + * absolute path) */ +#define FDT_ERR_BADPHANDLE 6 + /* FDT_ERR_BADPHANDLE: Function was passed an invalid phandle. + * This can be caused either by an invalid phandle property + * length, or the phandle value was either 0 or -1, which are + * not permitted. */ +#define FDT_ERR_BADSTATE 7 + /* FDT_ERR_BADSTATE: Function was passed an incomplete device + * tree created by the sequential-write functions, which is + * not sufficiently complete for the requested operation. */ + +/* Error codes: codes for bad device tree blobs */ +#define FDT_ERR_TRUNCATED 8 + /* FDT_ERR_TRUNCATED: FDT or a sub-block is improperly + * terminated (overflows, goes outside allowed bounds, or + * isn't properly terminated). */ +#define FDT_ERR_BADMAGIC 9 + /* FDT_ERR_BADMAGIC: Given "device tree" appears not to be a + * device tree at all - it is missing the flattened device + * tree magic number. */ +#define FDT_ERR_BADVERSION 10 + /* FDT_ERR_BADVERSION: Given device tree has a version which + * can't be handled by the requested operation. For + * read-write functions, this may mean that fdt_open_into() is + * required to convert the tree to the expected version. */ +#define FDT_ERR_BADSTRUCTURE 11 + /* FDT_ERR_BADSTRUCTURE: Given device tree has a corrupt + * structure block or other serious error (e.g. misnested + * nodes, or subnodes preceding properties). */ +#define FDT_ERR_BADLAYOUT 12 + /* FDT_ERR_BADLAYOUT: For read-write functions, the given + * device tree has it's sub-blocks in an order that the + * function can't handle (memory reserve map, then structure, + * then strings). Use fdt_open_into() to reorganize the tree + * into a form suitable for the read-write operations. */ + +/* "Can't happen" error indicating a bug in libfdt */ +#define FDT_ERR_INTERNAL 13 + /* FDT_ERR_INTERNAL: libfdt has failed an internal assertion. + * Should never be returned, if it is, it indicates a bug in + * libfdt itself. */ + +/* Errors in device tree content */ +#define FDT_ERR_BADNCELLS 14 + /* FDT_ERR_BADNCELLS: Device tree has a #address-cells, #size-cells + * or similar property with a bad format or value */ + +#define FDT_ERR_BADVALUE 15 + /* FDT_ERR_BADVALUE: Device tree has a property with an unexpected + * value. For example: a property expected to contain a string list + * is not NUL-terminated within the length of its value. */ + +#define FDT_ERR_BADOVERLAY 16 + /* FDT_ERR_BADOVERLAY: The device tree overlay, while + * correctly structured, cannot be applied due to some + * unexpected or missing value, property or node. */ + +#define FDT_ERR_NOPHANDLES 17 + /* FDT_ERR_NOPHANDLES: The device tree doesn't have any + * phandle available anymore without causing an overflow */ + +#define FDT_ERR_BADFLAGS 18 + /* FDT_ERR_BADFLAGS: The function was passed a flags field that + * contains invalid flags or an invalid combination of flags. */ + +#define FDT_ERR_MAX 18 + +/* constants */ +#define FDT_MAX_PHANDLE 0xfffffffe + /* Valid values for phandles range from 1 to 2^32-2. */ + +#ifdef __cplusplus +extern "C" { +#endif +/**********************************************************************/ +/* Low-level functions (you probably don't need these) */ +/**********************************************************************/ + +#ifndef SWIG /* This function is not useful in Python */ +const void *fdt_offset_ptr(const void *fdt, int offset, unsigned int checklen); +#endif +static inline void *fdt_offset_ptr_w(void *fdt, int offset, int checklen) +{ + return (void *)(uintptr_t)fdt_offset_ptr(fdt, offset, checklen); +} + +uint32_t fdt_next_tag(const void *fdt, int offset, int *nextoffset); + +/* + * Alignment helpers: + * These helpers access words from a device tree blob. They're + * built to work even with unaligned pointers on platforms (ike + * ARM) that don't like unaligned loads and stores + */ + +static inline uint32_t fdt32_ld(const fdt32_t *p) +{ + const uint8_t *bp = (const uint8_t *)p; + + return ((uint32_t)bp[0] << 24) + | ((uint32_t)bp[1] << 16) + | ((uint32_t)bp[2] << 8) + | bp[3]; +} + +static inline void fdt32_st(void *property, uint32_t value) +{ + uint8_t *bp = (uint8_t *)property; + + bp[0] = value >> 24; + bp[1] = (value >> 16) & 0xff; + bp[2] = (value >> 8) & 0xff; + bp[3] = value & 0xff; +} + +static inline uint64_t fdt64_ld(const fdt64_t *p) +{ + const uint8_t *bp = (const uint8_t *)p; + + return ((uint64_t)bp[0] << 56) + | ((uint64_t)bp[1] << 48) + | ((uint64_t)bp[2] << 40) + | ((uint64_t)bp[3] << 32) + | ((uint64_t)bp[4] << 24) + | ((uint64_t)bp[5] << 16) + | ((uint64_t)bp[6] << 8) + | bp[7]; +} + +static inline void fdt64_st(void *property, uint64_t value) +{ + uint8_t *bp = (uint8_t *)property; + + bp[0] = value >> 56; + bp[1] = (value >> 48) & 0xff; + bp[2] = (value >> 40) & 0xff; + bp[3] = (value >> 32) & 0xff; + bp[4] = (value >> 24) & 0xff; + bp[5] = (value >> 16) & 0xff; + bp[6] = (value >> 8) & 0xff; + bp[7] = value & 0xff; +} + +/**********************************************************************/ +/* Traversal functions */ +/**********************************************************************/ + +int fdt_next_node(const void *fdt, int offset, int *depth); + +/** + * fdt_first_subnode() - get offset of first direct subnode + * + * @fdt: FDT blob + * @offset: Offset of node to check + * @return offset of first subnode, or -FDT_ERR_NOTFOUND if there is none + */ +int fdt_first_subnode(const void *fdt, int offset); + +/** + * fdt_next_subnode() - get offset of next direct subnode + * + * After first calling fdt_first_subnode(), call this function repeatedly to + * get direct subnodes of a parent node. + * + * @fdt: FDT blob + * @offset: Offset of previous subnode + * @return offset of next subnode, or -FDT_ERR_NOTFOUND if there are no more + * subnodes + */ +int fdt_next_subnode(const void *fdt, int offset); + +/** + * fdt_for_each_subnode - iterate over all subnodes of a parent + * + * @node: child node (int, lvalue) + * @fdt: FDT blob (const void *) + * @parent: parent node (int) + * + * This is actually a wrapper around a for loop and would be used like so: + * + * fdt_for_each_subnode(node, fdt, parent) { + * Use node + * ... + * } + * + * if ((node < 0) && (node != -FDT_ERR_NOTFOUND)) { + * Error handling + * } + * + * Note that this is implemented as a macro and @node is used as + * iterator in the loop. The parent variable be constant or even a + * literal. + * + */ +#define fdt_for_each_subnode(node, fdt, parent) \ + for (node = fdt_first_subnode(fdt, parent); \ + node >= 0; \ + node = fdt_next_subnode(fdt, node)) + +/**********************************************************************/ +/* General functions */ +/**********************************************************************/ +#define fdt_get_header(fdt, field) \ + (fdt32_ld(&((const struct fdt_header *)(fdt))->field)) +#define fdt_magic(fdt) (fdt_get_header(fdt, magic)) +#define fdt_totalsize(fdt) (fdt_get_header(fdt, totalsize)) +#define fdt_off_dt_struct(fdt) (fdt_get_header(fdt, off_dt_struct)) +#define fdt_off_dt_strings(fdt) (fdt_get_header(fdt, off_dt_strings)) +#define fdt_off_mem_rsvmap(fdt) (fdt_get_header(fdt, off_mem_rsvmap)) +#define fdt_version(fdt) (fdt_get_header(fdt, version)) +#define fdt_last_comp_version(fdt) (fdt_get_header(fdt, last_comp_version)) +#define fdt_boot_cpuid_phys(fdt) (fdt_get_header(fdt, boot_cpuid_phys)) +#define fdt_size_dt_strings(fdt) (fdt_get_header(fdt, size_dt_strings)) +#define fdt_size_dt_struct(fdt) (fdt_get_header(fdt, size_dt_struct)) + +#define fdt_set_hdr_(name) \ + static inline void fdt_set_##name(void *fdt, uint32_t val) \ + { \ + struct fdt_header *fdth = (struct fdt_header *)fdt; \ + fdth->name = cpu_to_fdt32(val); \ + } +fdt_set_hdr_(magic); +fdt_set_hdr_(totalsize); +fdt_set_hdr_(off_dt_struct); +fdt_set_hdr_(off_dt_strings); +fdt_set_hdr_(off_mem_rsvmap); +fdt_set_hdr_(version); +fdt_set_hdr_(last_comp_version); +fdt_set_hdr_(boot_cpuid_phys); +fdt_set_hdr_(size_dt_strings); +fdt_set_hdr_(size_dt_struct); +#undef fdt_set_hdr_ + +/** + * fdt_header_size - return the size of the tree's header + * @fdt: pointer to a flattened device tree + */ +size_t fdt_header_size_(uint32_t version); +static inline size_t fdt_header_size(const void *fdt) +{ + return fdt_header_size_(fdt_version(fdt)); +} + +/** + * fdt_check_header - sanity check a device tree header + + * @fdt: pointer to data which might be a flattened device tree + * + * fdt_check_header() checks that the given buffer contains what + * appears to be a flattened device tree, and that the header contains + * valid information (to the extent that can be determined from the + * header alone). + * + * returns: + * 0, if the buffer appears to contain a valid device tree + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_TRUNCATED, standard meanings, as above + */ +int fdt_check_header(const void *fdt); + +/** + * fdt_move - move a device tree around in memory + * @fdt: pointer to the device tree to move + * @buf: pointer to memory where the device is to be moved + * @bufsize: size of the memory space at buf + * + * fdt_move() relocates, if possible, the device tree blob located at + * fdt to the buffer at buf of size bufsize. The buffer may overlap + * with the existing device tree blob at fdt. Therefore, + * fdt_move(fdt, fdt, fdt_totalsize(fdt)) + * should always succeed. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, bufsize is insufficient to contain the device tree + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, standard meanings + */ +int fdt_move(const void *fdt, void *buf, int bufsize); + +/**********************************************************************/ +/* Read-only functions */ +/**********************************************************************/ + +int fdt_check_full(const void *fdt, size_t bufsize); + +/** + * fdt_get_string - retrieve a string from the strings block of a device tree + * @fdt: pointer to the device tree blob + * @stroffset: offset of the string within the strings block (native endian) + * @lenp: optional pointer to return the string's length + * + * fdt_get_string() retrieves a pointer to a single string from the + * strings block of the device tree blob at fdt, and optionally also + * returns the string's length in *lenp. + * + * returns: + * a pointer to the string, on success + * NULL, if stroffset is out of bounds, or doesn't point to a valid string + */ +const char *fdt_get_string(const void *fdt, int stroffset, int *lenp); + +/** + * fdt_string - retrieve a string from the strings block of a device tree + * @fdt: pointer to the device tree blob + * @stroffset: offset of the string within the strings block (native endian) + * + * fdt_string() retrieves a pointer to a single string from the + * strings block of the device tree blob at fdt. + * + * returns: + * a pointer to the string, on success + * NULL, if stroffset is out of bounds, or doesn't point to a valid string + */ +const char *fdt_string(const void *fdt, int stroffset); + +/** + * fdt_find_max_phandle - find and return the highest phandle in a tree + * @fdt: pointer to the device tree blob + * @phandle: return location for the highest phandle value found in the tree + * + * fdt_find_max_phandle() finds the highest phandle value in the given device + * tree. The value returned in @phandle is only valid if the function returns + * success. + * + * returns: + * 0 on success or a negative error code on failure + */ +int fdt_find_max_phandle(const void *fdt, uint32_t *phandle); + +/** + * fdt_get_max_phandle - retrieves the highest phandle in a tree + * @fdt: pointer to the device tree blob + * + * fdt_get_max_phandle retrieves the highest phandle in the given + * device tree. This will ignore badly formatted phandles, or phandles + * with a value of 0 or -1. + * + * This function is deprecated in favour of fdt_find_max_phandle(). + * + * returns: + * the highest phandle on success + * 0, if no phandle was found in the device tree + * -1, if an error occurred + */ +static inline uint32_t fdt_get_max_phandle(const void *fdt) +{ + uint32_t phandle; + int err; + + err = fdt_find_max_phandle(fdt, &phandle); + if (err < 0) + return (uint32_t)-1; + + return phandle; +} + +/** + * fdt_generate_phandle - return a new, unused phandle for a device tree blob + * @fdt: pointer to the device tree blob + * @phandle: return location for the new phandle + * + * Walks the device tree blob and looks for the highest phandle value. On + * success, the new, unused phandle value (one higher than the previously + * highest phandle value in the device tree blob) will be returned in the + * @phandle parameter. + * + * Returns: + * 0 on success or a negative error-code on failure + */ +int fdt_generate_phandle(const void *fdt, uint32_t *phandle); + +/** + * fdt_num_mem_rsv - retrieve the number of memory reserve map entries + * @fdt: pointer to the device tree blob + * + * Returns the number of entries in the device tree blob's memory + * reservation map. This does not include the terminating 0,0 entry + * or any other (0,0) entries reserved for expansion. + * + * returns: + * the number of entries + */ +int fdt_num_mem_rsv(const void *fdt); + +/** + * fdt_get_mem_rsv - retrieve one memory reserve map entry + * @fdt: pointer to the device tree blob + * @address, @size: pointers to 64-bit variables + * + * On success, *address and *size will contain the address and size of + * the n-th reserve map entry from the device tree blob, in + * native-endian format. + * + * returns: + * 0, on success + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, standard meanings + */ +int fdt_get_mem_rsv(const void *fdt, int n, uint64_t *address, uint64_t *size); + +/** + * fdt_subnode_offset_namelen - find a subnode based on substring + * @fdt: pointer to the device tree blob + * @parentoffset: structure block offset of a node + * @name: name of the subnode to locate + * @namelen: number of characters of name to consider + * + * Identical to fdt_subnode_offset(), but only examine the first + * namelen characters of name for matching the subnode name. This is + * useful for finding subnodes based on a portion of a larger string, + * such as a full path. + */ +#ifndef SWIG /* Not available in Python */ +int fdt_subnode_offset_namelen(const void *fdt, int parentoffset, + const char *name, int namelen); +#endif +/** + * fdt_subnode_offset - find a subnode of a given node + * @fdt: pointer to the device tree blob + * @parentoffset: structure block offset of a node + * @name: name of the subnode to locate + * + * fdt_subnode_offset() finds a subnode of the node at structure block + * offset parentoffset with the given name. name may include a unit + * address, in which case fdt_subnode_offset() will find the subnode + * with that unit address, or the unit address may be omitted, in + * which case fdt_subnode_offset() will find an arbitrary subnode + * whose name excluding unit address matches the given name. + * + * returns: + * structure block offset of the requested subnode (>=0), on success + * -FDT_ERR_NOTFOUND, if the requested subnode does not exist + * -FDT_ERR_BADOFFSET, if parentoffset did not point to an FDT_BEGIN_NODE + * tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings. + */ +int fdt_subnode_offset(const void *fdt, int parentoffset, const char *name); + +/** + * fdt_path_offset_namelen - find a tree node by its full path + * @fdt: pointer to the device tree blob + * @path: full path of the node to locate + * @namelen: number of characters of path to consider + * + * Identical to fdt_path_offset(), but only consider the first namelen + * characters of path as the path name. + */ +#ifndef SWIG /* Not available in Python */ +int fdt_path_offset_namelen(const void *fdt, const char *path, int namelen); +#endif + +/** + * fdt_path_offset - find a tree node by its full path + * @fdt: pointer to the device tree blob + * @path: full path of the node to locate + * + * fdt_path_offset() finds a node of a given path in the device tree. + * Each path component may omit the unit address portion, but the + * results of this are undefined if any such path component is + * ambiguous (that is if there are multiple nodes at the relevant + * level matching the given component, differentiated only by unit + * address). + * + * returns: + * structure block offset of the node with the requested path (>=0), on + * success + * -FDT_ERR_BADPATH, given path does not begin with '/' or is invalid + * -FDT_ERR_NOTFOUND, if the requested node does not exist + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings. + */ +int fdt_path_offset(const void *fdt, const char *path); + +/** + * fdt_get_name - retrieve the name of a given node + * @fdt: pointer to the device tree blob + * @nodeoffset: structure block offset of the starting node + * @lenp: pointer to an integer variable (will be overwritten) or NULL + * + * fdt_get_name() retrieves the name (including unit address) of the + * device tree node at structure block offset nodeoffset. If lenp is + * non-NULL, the length of this name is also returned, in the integer + * pointed to by lenp. + * + * returns: + * pointer to the node's name, on success + * If lenp is non-NULL, *lenp contains the length of that name + * (>=0) + * NULL, on error + * if lenp is non-NULL *lenp contains an error code (<0): + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE + * tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, standard meanings + */ +const char *fdt_get_name(const void *fdt, int nodeoffset, int *lenp); + +/** + * fdt_first_property_offset - find the offset of a node's first property + * @fdt: pointer to the device tree blob + * @nodeoffset: structure block offset of a node + * + * fdt_first_property_offset() finds the first property of the node at + * the given structure block offset. + * + * returns: + * structure block offset of the property (>=0), on success + * -FDT_ERR_NOTFOUND, if the requested node has no properties + * -FDT_ERR_BADOFFSET, if nodeoffset did not point to an FDT_BEGIN_NODE tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings. + */ +int fdt_first_property_offset(const void *fdt, int nodeoffset); + +/** + * fdt_next_property_offset - step through a node's properties + * @fdt: pointer to the device tree blob + * @offset: structure block offset of a property + * + * fdt_next_property_offset() finds the property immediately after the + * one at the given structure block offset. This will be a property + * of the same node as the given property. + * + * returns: + * structure block offset of the next property (>=0), on success + * -FDT_ERR_NOTFOUND, if the given property is the last in its node + * -FDT_ERR_BADOFFSET, if nodeoffset did not point to an FDT_PROP tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings. + */ +int fdt_next_property_offset(const void *fdt, int offset); + +/** + * fdt_for_each_property_offset - iterate over all properties of a node + * + * @property_offset: property offset (int, lvalue) + * @fdt: FDT blob (const void *) + * @node: node offset (int) + * + * This is actually a wrapper around a for loop and would be used like so: + * + * fdt_for_each_property_offset(property, fdt, node) { + * Use property + * ... + * } + * + * if ((property < 0) && (property != -FDT_ERR_NOTFOUND)) { + * Error handling + * } + * + * Note that this is implemented as a macro and property is used as + * iterator in the loop. The node variable can be constant or even a + * literal. + */ +#define fdt_for_each_property_offset(property, fdt, node) \ + for (property = fdt_first_property_offset(fdt, node); \ + property >= 0; \ + property = fdt_next_property_offset(fdt, property)) + +/** + * fdt_get_property_by_offset - retrieve the property at a given offset + * @fdt: pointer to the device tree blob + * @offset: offset of the property to retrieve + * @lenp: pointer to an integer variable (will be overwritten) or NULL + * + * fdt_get_property_by_offset() retrieves a pointer to the + * fdt_property structure within the device tree blob at the given + * offset. If lenp is non-NULL, the length of the property value is + * also returned, in the integer pointed to by lenp. + * + * Note that this code only works on device tree versions >= 16. fdt_getprop() + * works on all versions. + * + * returns: + * pointer to the structure representing the property + * if lenp is non-NULL, *lenp contains the length of the property + * value (>=0) + * NULL, on error + * if lenp is non-NULL, *lenp contains an error code (<0): + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_PROP tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings + */ +const struct fdt_property *fdt_get_property_by_offset(const void *fdt, + int offset, + int *lenp); + +/** + * fdt_get_property_namelen - find a property based on substring + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to find + * @name: name of the property to find + * @namelen: number of characters of name to consider + * @lenp: pointer to an integer variable (will be overwritten) or NULL + * + * Identical to fdt_get_property(), but only examine the first namelen + * characters of name for matching the property name. + */ +#ifndef SWIG /* Not available in Python */ +const struct fdt_property *fdt_get_property_namelen(const void *fdt, + int nodeoffset, + const char *name, + int namelen, int *lenp); +#endif + +/** + * fdt_get_property - find a given property in a given node + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to find + * @name: name of the property to find + * @lenp: pointer to an integer variable (will be overwritten) or NULL + * + * fdt_get_property() retrieves a pointer to the fdt_property + * structure within the device tree blob corresponding to the property + * named 'name' of the node at offset nodeoffset. If lenp is + * non-NULL, the length of the property value is also returned, in the + * integer pointed to by lenp. + * + * returns: + * pointer to the structure representing the property + * if lenp is non-NULL, *lenp contains the length of the property + * value (>=0) + * NULL, on error + * if lenp is non-NULL, *lenp contains an error code (<0): + * -FDT_ERR_NOTFOUND, node does not have named property + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE + * tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings + */ +const struct fdt_property *fdt_get_property(const void *fdt, int nodeoffset, + const char *name, int *lenp); +static inline struct fdt_property *fdt_get_property_w(void *fdt, int nodeoffset, + const char *name, + int *lenp) +{ + return (struct fdt_property *)(uintptr_t) + fdt_get_property(fdt, nodeoffset, name, lenp); +} + +/** + * fdt_getprop_by_offset - retrieve the value of a property at a given offset + * @fdt: pointer to the device tree blob + * @offset: offset of the property to read + * @namep: pointer to a string variable (will be overwritten) or NULL + * @lenp: pointer to an integer variable (will be overwritten) or NULL + * + * fdt_getprop_by_offset() retrieves a pointer to the value of the + * property at structure block offset 'offset' (this will be a pointer + * to within the device blob itself, not a copy of the value). If + * lenp is non-NULL, the length of the property value is also + * returned, in the integer pointed to by lenp. If namep is non-NULL, + * the property's namne will also be returned in the char * pointed to + * by namep (this will be a pointer to within the device tree's string + * block, not a new copy of the name). + * + * returns: + * pointer to the property's value + * if lenp is non-NULL, *lenp contains the length of the property + * value (>=0) + * if namep is non-NULL *namep contiains a pointer to the property + * name. + * NULL, on error + * if lenp is non-NULL, *lenp contains an error code (<0): + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_PROP tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings + */ +#ifndef SWIG /* This function is not useful in Python */ +const void *fdt_getprop_by_offset(const void *fdt, int offset, + const char **namep, int *lenp); +#endif + +/** + * fdt_getprop_namelen - get property value based on substring + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to find + * @name: name of the property to find + * @namelen: number of characters of name to consider + * @lenp: pointer to an integer variable (will be overwritten) or NULL + * + * Identical to fdt_getprop(), but only examine the first namelen + * characters of name for matching the property name. + */ +#ifndef SWIG /* Not available in Python */ +const void *fdt_getprop_namelen(const void *fdt, int nodeoffset, + const char *name, int namelen, int *lenp); +static inline void *fdt_getprop_namelen_w(void *fdt, int nodeoffset, + const char *name, int namelen, + int *lenp) +{ + return (void *)(uintptr_t)fdt_getprop_namelen(fdt, nodeoffset, name, + namelen, lenp); +} +#endif + +/** + * fdt_getprop - retrieve the value of a given property + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to find + * @name: name of the property to find + * @lenp: pointer to an integer variable (will be overwritten) or NULL + * + * fdt_getprop() retrieves a pointer to the value of the property + * named 'name' of the node at offset nodeoffset (this will be a + * pointer to within the device blob itself, not a copy of the value). + * If lenp is non-NULL, the length of the property value is also + * returned, in the integer pointed to by lenp. + * + * returns: + * pointer to the property's value + * if lenp is non-NULL, *lenp contains the length of the property + * value (>=0) + * NULL, on error + * if lenp is non-NULL, *lenp contains an error code (<0): + * -FDT_ERR_NOTFOUND, node does not have named property + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE + * tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings + */ +const void *fdt_getprop(const void *fdt, int nodeoffset, + const char *name, int *lenp); +static inline void *fdt_getprop_w(void *fdt, int nodeoffset, + const char *name, int *lenp) +{ + return (void *)(uintptr_t)fdt_getprop(fdt, nodeoffset, name, lenp); +} + +/** + * fdt_get_phandle - retrieve the phandle of a given node + * @fdt: pointer to the device tree blob + * @nodeoffset: structure block offset of the node + * + * fdt_get_phandle() retrieves the phandle of the device tree node at + * structure block offset nodeoffset. + * + * returns: + * the phandle of the node at nodeoffset, on success (!= 0, != -1) + * 0, if the node has no phandle, or another error occurs + */ +uint32_t fdt_get_phandle(const void *fdt, int nodeoffset); + +/** + * fdt_get_alias_namelen - get alias based on substring + * @fdt: pointer to the device tree blob + * @name: name of the alias th look up + * @namelen: number of characters of name to consider + * + * Identical to fdt_get_alias(), but only examine the first namelen + * characters of name for matching the alias name. + */ +#ifndef SWIG /* Not available in Python */ +const char *fdt_get_alias_namelen(const void *fdt, + const char *name, int namelen); +#endif + +/** + * fdt_get_alias - retrieve the path referenced by a given alias + * @fdt: pointer to the device tree blob + * @name: name of the alias th look up + * + * fdt_get_alias() retrieves the value of a given alias. That is, the + * value of the property named 'name' in the node /aliases. + * + * returns: + * a pointer to the expansion of the alias named 'name', if it exists + * NULL, if the given alias or the /aliases node does not exist + */ +const char *fdt_get_alias(const void *fdt, const char *name); + +/** + * fdt_get_path - determine the full path of a node + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose path to find + * @buf: character buffer to contain the returned path (will be overwritten) + * @buflen: size of the character buffer at buf + * + * fdt_get_path() computes the full path of the node at offset + * nodeoffset, and records that path in the buffer at buf. + * + * NOTE: This function is expensive, as it must scan the device tree + * structure from the start to nodeoffset. + * + * returns: + * 0, on success + * buf contains the absolute path of the node at + * nodeoffset, as a NUL-terminated string. + * -FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag + * -FDT_ERR_NOSPACE, the path of the given node is longer than (bufsize-1) + * characters and will not fit in the given buffer. + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, standard meanings + */ +int fdt_get_path(const void *fdt, int nodeoffset, char *buf, int buflen); + +/** + * fdt_supernode_atdepth_offset - find a specific ancestor of a node + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose parent to find + * @supernodedepth: depth of the ancestor to find + * @nodedepth: pointer to an integer variable (will be overwritten) or NULL + * + * fdt_supernode_atdepth_offset() finds an ancestor of the given node + * at a specific depth from the root (where the root itself has depth + * 0, its immediate subnodes depth 1 and so forth). So + * fdt_supernode_atdepth_offset(fdt, nodeoffset, 0, NULL); + * will always return 0, the offset of the root node. If the node at + * nodeoffset has depth D, then: + * fdt_supernode_atdepth_offset(fdt, nodeoffset, D, NULL); + * will return nodeoffset itself. + * + * NOTE: This function is expensive, as it must scan the device tree + * structure from the start to nodeoffset. + * + * returns: + * structure block offset of the node at node offset's ancestor + * of depth supernodedepth (>=0), on success + * -FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag + * -FDT_ERR_NOTFOUND, supernodedepth was greater than the depth of + * nodeoffset + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, standard meanings + */ +int fdt_supernode_atdepth_offset(const void *fdt, int nodeoffset, + int supernodedepth, int *nodedepth); + +/** + * fdt_node_depth - find the depth of a given node + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose parent to find + * + * fdt_node_depth() finds the depth of a given node. The root node + * has depth 0, its immediate subnodes depth 1 and so forth. + * + * NOTE: This function is expensive, as it must scan the device tree + * structure from the start to nodeoffset. + * + * returns: + * depth of the node at nodeoffset (>=0), on success + * -FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, standard meanings + */ +int fdt_node_depth(const void *fdt, int nodeoffset); + +/** + * fdt_parent_offset - find the parent of a given node + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose parent to find + * + * fdt_parent_offset() locates the parent node of a given node (that + * is, it finds the offset of the node which contains the node at + * nodeoffset as a subnode). + * + * NOTE: This function is expensive, as it must scan the device tree + * structure from the start to nodeoffset, *twice*. + * + * returns: + * structure block offset of the parent of the node at nodeoffset + * (>=0), on success + * -FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, standard meanings + */ +int fdt_parent_offset(const void *fdt, int nodeoffset); + +/** + * fdt_node_offset_by_prop_value - find nodes with a given property value + * @fdt: pointer to the device tree blob + * @startoffset: only find nodes after this offset + * @propname: property name to check + * @propval: property value to search for + * @proplen: length of the value in propval + * + * fdt_node_offset_by_prop_value() returns the offset of the first + * node after startoffset, which has a property named propname whose + * value is of length proplen and has value equal to propval; or if + * startoffset is -1, the very first such node in the tree. + * + * To iterate through all nodes matching the criterion, the following + * idiom can be used: + * offset = fdt_node_offset_by_prop_value(fdt, -1, propname, + * propval, proplen); + * while (offset != -FDT_ERR_NOTFOUND) { + * // other code here + * offset = fdt_node_offset_by_prop_value(fdt, offset, propname, + * propval, proplen); + * } + * + * Note the -1 in the first call to the function, if 0 is used here + * instead, the function will never locate the root node, even if it + * matches the criterion. + * + * returns: + * structure block offset of the located node (>= 0, >startoffset), + * on success + * -FDT_ERR_NOTFOUND, no node matching the criterion exists in the + * tree after startoffset + * -FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, standard meanings + */ +int fdt_node_offset_by_prop_value(const void *fdt, int startoffset, + const char *propname, + const void *propval, int proplen); + +/** + * fdt_node_offset_by_phandle - find the node with a given phandle + * @fdt: pointer to the device tree blob + * @phandle: phandle value + * + * fdt_node_offset_by_phandle() returns the offset of the node + * which has the given phandle value. If there is more than one node + * in the tree with the given phandle (an invalid tree), results are + * undefined. + * + * returns: + * structure block offset of the located node (>= 0), on success + * -FDT_ERR_NOTFOUND, no node with that phandle exists + * -FDT_ERR_BADPHANDLE, given phandle value was invalid (0 or -1) + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, standard meanings + */ +int fdt_node_offset_by_phandle(const void *fdt, uint32_t phandle); + +/** + * fdt_node_check_compatible: check a node's compatible property + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of a tree node + * @compatible: string to match against + * + * + * fdt_node_check_compatible() returns 0 if the given node contains a + * 'compatible' property with the given string as one of its elements, + * it returns non-zero otherwise, or on error. + * + * returns: + * 0, if the node has a 'compatible' property listing the given string + * 1, if the node has a 'compatible' property, but it does not list + * the given string + * -FDT_ERR_NOTFOUND, if the given node has no 'compatible' property + * -FDT_ERR_BADOFFSET, if nodeoffset does not refer to a BEGIN_NODE tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, standard meanings + */ +int fdt_node_check_compatible(const void *fdt, int nodeoffset, + const char *compatible); + +/** + * fdt_node_offset_by_compatible - find nodes with a given 'compatible' value + * @fdt: pointer to the device tree blob + * @startoffset: only find nodes after this offset + * @compatible: 'compatible' string to match against + * + * fdt_node_offset_by_compatible() returns the offset of the first + * node after startoffset, which has a 'compatible' property which + * lists the given compatible string; or if startoffset is -1, the + * very first such node in the tree. + * + * To iterate through all nodes matching the criterion, the following + * idiom can be used: + * offset = fdt_node_offset_by_compatible(fdt, -1, compatible); + * while (offset != -FDT_ERR_NOTFOUND) { + * // other code here + * offset = fdt_node_offset_by_compatible(fdt, offset, compatible); + * } + * + * Note the -1 in the first call to the function, if 0 is used here + * instead, the function will never locate the root node, even if it + * matches the criterion. + * + * returns: + * structure block offset of the located node (>= 0, >startoffset), + * on success + * -FDT_ERR_NOTFOUND, no node matching the criterion exists in the + * tree after startoffset + * -FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, standard meanings + */ +int fdt_node_offset_by_compatible(const void *fdt, int startoffset, + const char *compatible); + +/** + * fdt_stringlist_contains - check a string list property for a string + * @strlist: Property containing a list of strings to check + * @listlen: Length of property + * @str: String to search for + * + * This is a utility function provided for convenience. The list contains + * one or more strings, each terminated by \0, as is found in a device tree + * "compatible" property. + * + * @return: 1 if the string is found in the list, 0 not found, or invalid list + */ +int fdt_stringlist_contains(const char *strlist, int listlen, const char *str); + +/** + * fdt_stringlist_count - count the number of strings in a string list + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of a tree node + * @property: name of the property containing the string list + * @return: + * the number of strings in the given property + * -FDT_ERR_BADVALUE if the property value is not NUL-terminated + * -FDT_ERR_NOTFOUND if the property does not exist + */ +int fdt_stringlist_count(const void *fdt, int nodeoffset, const char *property); + +/** + * fdt_stringlist_search - find a string in a string list and return its index + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of a tree node + * @property: name of the property containing the string list + * @string: string to look up in the string list + * + * Note that it is possible for this function to succeed on property values + * that are not NUL-terminated. That's because the function will stop after + * finding the first occurrence of @string. This can for example happen with + * small-valued cell properties, such as #address-cells, when searching for + * the empty string. + * + * @return: + * the index of the string in the list of strings + * -FDT_ERR_BADVALUE if the property value is not NUL-terminated + * -FDT_ERR_NOTFOUND if the property does not exist or does not contain + * the given string + */ +int fdt_stringlist_search(const void *fdt, int nodeoffset, const char *property, + const char *string); + +/** + * fdt_stringlist_get() - obtain the string at a given index in a string list + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of a tree node + * @property: name of the property containing the string list + * @index: index of the string to return + * @lenp: return location for the string length or an error code on failure + * + * Note that this will successfully extract strings from properties with + * non-NUL-terminated values. For example on small-valued cell properties + * this function will return the empty string. + * + * If non-NULL, the length of the string (on success) or a negative error-code + * (on failure) will be stored in the integer pointer to by lenp. + * + * @return: + * A pointer to the string at the given index in the string list or NULL on + * failure. On success the length of the string will be stored in the memory + * location pointed to by the lenp parameter, if non-NULL. On failure one of + * the following negative error codes will be returned in the lenp parameter + * (if non-NULL): + * -FDT_ERR_BADVALUE if the property value is not NUL-terminated + * -FDT_ERR_NOTFOUND if the property does not exist + */ +const char *fdt_stringlist_get(const void *fdt, int nodeoffset, + const char *property, int index, + int *lenp); + +/**********************************************************************/ +/* Read-only functions (addressing related) */ +/**********************************************************************/ + +/** + * FDT_MAX_NCELLS - maximum value for #address-cells and #size-cells + * + * This is the maximum value for #address-cells, #size-cells and + * similar properties that will be processed by libfdt. IEE1275 + * requires that OF implementations handle values up to 4. + * Implementations may support larger values, but in practice higher + * values aren't used. + */ +#define FDT_MAX_NCELLS 4 + +/** + * fdt_address_cells - retrieve address size for a bus represented in the tree + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node to find the address size for + * + * When the node has a valid #address-cells property, returns its value. + * + * returns: + * 0 <= n < FDT_MAX_NCELLS, on success + * 2, if the node has no #address-cells property + * -FDT_ERR_BADNCELLS, if the node has a badly formatted or invalid + * #address-cells property + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings + */ +int fdt_address_cells(const void *fdt, int nodeoffset); + +/** + * fdt_size_cells - retrieve address range size for a bus represented in the + * tree + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node to find the address range size for + * + * When the node has a valid #size-cells property, returns its value. + * + * returns: + * 0 <= n < FDT_MAX_NCELLS, on success + * 1, if the node has no #size-cells property + * -FDT_ERR_BADNCELLS, if the node has a badly formatted or invalid + * #size-cells property + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings + */ +int fdt_size_cells(const void *fdt, int nodeoffset); + + +/**********************************************************************/ +/* Write-in-place functions */ +/**********************************************************************/ + +/** + * fdt_setprop_inplace_namelen_partial - change a property's value, + * but not its size + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to change + * @name: name of the property to change + * @namelen: number of characters of name to consider + * @idx: index of the property to change in the array + * @val: pointer to data to replace the property value with + * @len: length of the property value + * + * Identical to fdt_setprop_inplace(), but modifies the given property + * starting from the given index, and using only the first characters + * of the name. It is useful when you want to manipulate only one value of + * an array and you have a string that doesn't end with \0. + */ +#ifndef SWIG /* Not available in Python */ +int fdt_setprop_inplace_namelen_partial(void *fdt, int nodeoffset, + const char *name, int namelen, + uint32_t idx, const void *val, + int len); +#endif + +/** + * fdt_setprop_inplace - change a property's value, but not its size + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to change + * @name: name of the property to change + * @val: pointer to data to replace the property value with + * @len: length of the property value + * + * fdt_setprop_inplace() replaces the value of a given property with + * the data in val, of length len. This function cannot change the + * size of a property, and so will only work if len is equal to the + * current length of the property. + * + * This function will alter only the bytes in the blob which contain + * the given property value, and will not alter or move any other part + * of the tree. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, if len is not equal to the property's current length + * -FDT_ERR_NOTFOUND, node does not have the named property + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings + */ +#ifndef SWIG /* Not available in Python */ +int fdt_setprop_inplace(void *fdt, int nodeoffset, const char *name, + const void *val, int len); +#endif + +/** + * fdt_setprop_inplace_u32 - change the value of a 32-bit integer property + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to change + * @name: name of the property to change + * @val: 32-bit integer value to replace the property with + * + * fdt_setprop_inplace_u32() replaces the value of a given property + * with the 32-bit integer value in val, converting val to big-endian + * if necessary. This function cannot change the size of a property, + * and so will only work if the property already exists and has length + * 4. + * + * This function will alter only the bytes in the blob which contain + * the given property value, and will not alter or move any other part + * of the tree. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, if the property's length is not equal to 4 + * -FDT_ERR_NOTFOUND, node does not have the named property + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings + */ +static inline int fdt_setprop_inplace_u32(void *fdt, int nodeoffset, + const char *name, uint32_t val) +{ + fdt32_t tmp = cpu_to_fdt32(val); + return fdt_setprop_inplace(fdt, nodeoffset, name, &tmp, sizeof(tmp)); +} + +/** + * fdt_setprop_inplace_u64 - change the value of a 64-bit integer property + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to change + * @name: name of the property to change + * @val: 64-bit integer value to replace the property with + * + * fdt_setprop_inplace_u64() replaces the value of a given property + * with the 64-bit integer value in val, converting val to big-endian + * if necessary. This function cannot change the size of a property, + * and so will only work if the property already exists and has length + * 8. + * + * This function will alter only the bytes in the blob which contain + * the given property value, and will not alter or move any other part + * of the tree. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, if the property's length is not equal to 8 + * -FDT_ERR_NOTFOUND, node does not have the named property + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings + */ +static inline int fdt_setprop_inplace_u64(void *fdt, int nodeoffset, + const char *name, uint64_t val) +{ + fdt64_t tmp = cpu_to_fdt64(val); + return fdt_setprop_inplace(fdt, nodeoffset, name, &tmp, sizeof(tmp)); +} + +/** + * fdt_setprop_inplace_cell - change the value of a single-cell property + * + * This is an alternative name for fdt_setprop_inplace_u32() + */ +static inline int fdt_setprop_inplace_cell(void *fdt, int nodeoffset, + const char *name, uint32_t val) +{ + return fdt_setprop_inplace_u32(fdt, nodeoffset, name, val); +} + +/** + * fdt_nop_property - replace a property with nop tags + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to nop + * @name: name of the property to nop + * + * fdt_nop_property() will replace a given property's representation + * in the blob with FDT_NOP tags, effectively removing it from the + * tree. + * + * This function will alter only the bytes in the blob which contain + * the property, and will not alter or move any other part of the + * tree. + * + * returns: + * 0, on success + * -FDT_ERR_NOTFOUND, node does not have the named property + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings + */ +int fdt_nop_property(void *fdt, int nodeoffset, const char *name); + +/** + * fdt_nop_node - replace a node (subtree) with nop tags + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node to nop + * + * fdt_nop_node() will replace a given node's representation in the + * blob, including all its subnodes, if any, with FDT_NOP tags, + * effectively removing it from the tree. + * + * This function will alter only the bytes in the blob which contain + * the node and its properties and subnodes, and will not alter or + * move any other part of the tree. + * + * returns: + * 0, on success + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings + */ +int fdt_nop_node(void *fdt, int nodeoffset); + +/**********************************************************************/ +/* Sequential write functions */ +/**********************************************************************/ + +/* fdt_create_with_flags flags */ +#define FDT_CREATE_FLAG_NO_NAME_DEDUP 0x1 + /* FDT_CREATE_FLAG_NO_NAME_DEDUP: Do not try to de-duplicate property + * names in the fdt. This can result in faster creation times, but + * a larger fdt. */ + +#define FDT_CREATE_FLAGS_ALL (FDT_CREATE_FLAG_NO_NAME_DEDUP) + +/** + * fdt_create_with_flags - begin creation of a new fdt + * @fdt: pointer to memory allocated where fdt will be created + * @bufsize: size of the memory space at fdt + * @flags: a valid combination of FDT_CREATE_FLAG_ flags, or 0. + * + * fdt_create_with_flags() begins the process of creating a new fdt with + * the sequential write interface. + * + * fdt creation process must end with fdt_finished() to produce a valid fdt. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, bufsize is insufficient for a minimal fdt + * -FDT_ERR_BADFLAGS, flags is not valid + */ +int fdt_create_with_flags(void *buf, int bufsize, uint32_t flags); + +/** + * fdt_create - begin creation of a new fdt + * @fdt: pointer to memory allocated where fdt will be created + * @bufsize: size of the memory space at fdt + * + * fdt_create() is equivalent to fdt_create_with_flags() with flags=0. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, bufsize is insufficient for a minimal fdt + */ +int fdt_create(void *buf, int bufsize); + +int fdt_resize(void *fdt, void *buf, int bufsize); +int fdt_add_reservemap_entry(void *fdt, uint64_t addr, uint64_t size); +int fdt_finish_reservemap(void *fdt); +int fdt_begin_node(void *fdt, const char *name); +int fdt_property(void *fdt, const char *name, const void *val, int len); +static inline int fdt_property_u32(void *fdt, const char *name, uint32_t val) +{ + fdt32_t tmp = cpu_to_fdt32(val); + return fdt_property(fdt, name, &tmp, sizeof(tmp)); +} +static inline int fdt_property_u64(void *fdt, const char *name, uint64_t val) +{ + fdt64_t tmp = cpu_to_fdt64(val); + return fdt_property(fdt, name, &tmp, sizeof(tmp)); +} + +#ifndef SWIG /* Not available in Python */ +static inline int fdt_property_cell(void *fdt, const char *name, uint32_t val) +{ + return fdt_property_u32(fdt, name, val); +} +#endif + +/** + * fdt_property_placeholder - add a new property and return a ptr to its value + * + * @fdt: pointer to the device tree blob + * @name: name of property to add + * @len: length of property value in bytes + * @valp: returns a pointer to where where the value should be placed + * + * returns: + * 0, on success + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_NOSPACE, standard meanings + */ +int fdt_property_placeholder(void *fdt, const char *name, int len, void **valp); + +#define fdt_property_string(fdt, name, str) \ + fdt_property(fdt, name, str, strlen(str)+1) +int fdt_end_node(void *fdt); +int fdt_finish(void *fdt); + +/**********************************************************************/ +/* Read-write functions */ +/**********************************************************************/ + +int fdt_create_empty_tree(void *buf, int bufsize); +int fdt_open_into(const void *fdt, void *buf, int bufsize); +int fdt_pack(void *fdt); + +/** + * fdt_add_mem_rsv - add one memory reserve map entry + * @fdt: pointer to the device tree blob + * @address, @size: 64-bit values (native endian) + * + * Adds a reserve map entry to the given blob reserving a region at + * address address of length size. + * + * This function will insert data into the reserve map and will + * therefore change the indexes of some entries in the table. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to + * contain the new reservation entry + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_TRUNCATED, standard meanings + */ +int fdt_add_mem_rsv(void *fdt, uint64_t address, uint64_t size); + +/** + * fdt_del_mem_rsv - remove a memory reserve map entry + * @fdt: pointer to the device tree blob + * @n: entry to remove + * + * fdt_del_mem_rsv() removes the n-th memory reserve map entry from + * the blob. + * + * This function will delete data from the reservation table and will + * therefore change the indexes of some entries in the table. + * + * returns: + * 0, on success + * -FDT_ERR_NOTFOUND, there is no entry of the given index (i.e. there + * are less than n+1 reserve map entries) + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_TRUNCATED, standard meanings + */ +int fdt_del_mem_rsv(void *fdt, int n); + +/** + * fdt_set_name - change the name of a given node + * @fdt: pointer to the device tree blob + * @nodeoffset: structure block offset of a node + * @name: name to give the node + * + * fdt_set_name() replaces the name (including unit address, if any) + * of the given node with the given string. NOTE: this function can't + * efficiently check if the new name is unique amongst the given + * node's siblings; results are undefined if this function is invoked + * with a name equal to one of the given node's siblings. + * + * This function may insert or delete data from the blob, and will + * therefore change the offsets of some existing nodes. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, there is insufficient free space in the blob + * to contain the new name + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, standard meanings + */ +int fdt_set_name(void *fdt, int nodeoffset, const char *name); + +/** + * fdt_setprop - create or change a property + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to change + * @name: name of the property to change + * @val: pointer to data to set the property value to + * @len: length of the property value + * + * fdt_setprop() sets the value of the named property in the given + * node to the given value and length, creating the property if it + * does not already exist. + * + * This function may insert or delete data from the blob, and will + * therefore change the offsets of some existing nodes. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to + * contain the new property value + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_TRUNCATED, standard meanings + */ +int fdt_setprop(void *fdt, int nodeoffset, const char *name, + const void *val, int len); + +/** + * fdt_setprop_placeholder - allocate space for a property + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to change + * @name: name of the property to change + * @len: length of the property value + * @prop_data: return pointer to property data + * + * fdt_setprop_placeholer() allocates the named property in the given node. + * If the property exists it is resized. In either case a pointer to the + * property data is returned. + * + * This function may insert or delete data from the blob, and will + * therefore change the offsets of some existing nodes. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to + * contain the new property value + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_TRUNCATED, standard meanings + */ +int fdt_setprop_placeholder(void *fdt, int nodeoffset, const char *name, + int len, void **prop_data); + +/** + * fdt_setprop_u32 - set a property to a 32-bit integer + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to change + * @name: name of the property to change + * @val: 32-bit integer value for the property (native endian) + * + * fdt_setprop_u32() sets the value of the named property in the given + * node to the given 32-bit integer value (converting to big-endian if + * necessary), or creates a new property with that value if it does + * not already exist. + * + * This function may insert or delete data from the blob, and will + * therefore change the offsets of some existing nodes. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to + * contain the new property value + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_TRUNCATED, standard meanings + */ +static inline int fdt_setprop_u32(void *fdt, int nodeoffset, const char *name, + uint32_t val) +{ + fdt32_t tmp = cpu_to_fdt32(val); + return fdt_setprop(fdt, nodeoffset, name, &tmp, sizeof(tmp)); +} + +/** + * fdt_setprop_u64 - set a property to a 64-bit integer + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to change + * @name: name of the property to change + * @val: 64-bit integer value for the property (native endian) + * + * fdt_setprop_u64() sets the value of the named property in the given + * node to the given 64-bit integer value (converting to big-endian if + * necessary), or creates a new property with that value if it does + * not already exist. + * + * This function may insert or delete data from the blob, and will + * therefore change the offsets of some existing nodes. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to + * contain the new property value + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_TRUNCATED, standard meanings + */ +static inline int fdt_setprop_u64(void *fdt, int nodeoffset, const char *name, + uint64_t val) +{ + fdt64_t tmp = cpu_to_fdt64(val); + return fdt_setprop(fdt, nodeoffset, name, &tmp, sizeof(tmp)); +} + +/** + * fdt_setprop_cell - set a property to a single cell value + * + * This is an alternative name for fdt_setprop_u32() + */ +static inline int fdt_setprop_cell(void *fdt, int nodeoffset, const char *name, + uint32_t val) +{ + return fdt_setprop_u32(fdt, nodeoffset, name, val); +} + +/** + * fdt_setprop_string - set a property to a string value + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to change + * @name: name of the property to change + * @str: string value for the property + * + * fdt_setprop_string() sets the value of the named property in the + * given node to the given string value (using the length of the + * string to determine the new length of the property), or creates a + * new property with that value if it does not already exist. + * + * This function may insert or delete data from the blob, and will + * therefore change the offsets of some existing nodes. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to + * contain the new property value + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_TRUNCATED, standard meanings + */ +#define fdt_setprop_string(fdt, nodeoffset, name, str) \ + fdt_setprop((fdt), (nodeoffset), (name), (str), strlen(str)+1) + + +/** + * fdt_setprop_empty - set a property to an empty value + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to change + * @name: name of the property to change + * + * fdt_setprop_empty() sets the value of the named property in the + * given node to an empty (zero length) value, or creates a new empty + * property if it does not already exist. + * + * This function may insert or delete data from the blob, and will + * therefore change the offsets of some existing nodes. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to + * contain the new property value + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_TRUNCATED, standard meanings + */ +#define fdt_setprop_empty(fdt, nodeoffset, name) \ + fdt_setprop((fdt), (nodeoffset), (name), NULL, 0) + +/** + * fdt_appendprop - append to or create a property + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to change + * @name: name of the property to append to + * @val: pointer to data to append to the property value + * @len: length of the data to append to the property value + * + * fdt_appendprop() appends the value to the named property in the + * given node, creating the property if it does not already exist. + * + * This function may insert data into the blob, and will therefore + * change the offsets of some existing nodes. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to + * contain the new property value + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_TRUNCATED, standard meanings + */ +int fdt_appendprop(void *fdt, int nodeoffset, const char *name, + const void *val, int len); + +/** + * fdt_appendprop_u32 - append a 32-bit integer value to a property + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to change + * @name: name of the property to change + * @val: 32-bit integer value to append to the property (native endian) + * + * fdt_appendprop_u32() appends the given 32-bit integer value + * (converting to big-endian if necessary) to the value of the named + * property in the given node, or creates a new property with that + * value if it does not already exist. + * + * This function may insert data into the blob, and will therefore + * change the offsets of some existing nodes. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to + * contain the new property value + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_TRUNCATED, standard meanings + */ +static inline int fdt_appendprop_u32(void *fdt, int nodeoffset, + const char *name, uint32_t val) +{ + fdt32_t tmp = cpu_to_fdt32(val); + return fdt_appendprop(fdt, nodeoffset, name, &tmp, sizeof(tmp)); +} + +/** + * fdt_appendprop_u64 - append a 64-bit integer value to a property + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to change + * @name: name of the property to change + * @val: 64-bit integer value to append to the property (native endian) + * + * fdt_appendprop_u64() appends the given 64-bit integer value + * (converting to big-endian if necessary) to the value of the named + * property in the given node, or creates a new property with that + * value if it does not already exist. + * + * This function may insert data into the blob, and will therefore + * change the offsets of some existing nodes. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to + * contain the new property value + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_TRUNCATED, standard meanings + */ +static inline int fdt_appendprop_u64(void *fdt, int nodeoffset, + const char *name, uint64_t val) +{ + fdt64_t tmp = cpu_to_fdt64(val); + return fdt_appendprop(fdt, nodeoffset, name, &tmp, sizeof(tmp)); +} + +/** + * fdt_appendprop_cell - append a single cell value to a property + * + * This is an alternative name for fdt_appendprop_u32() + */ +static inline int fdt_appendprop_cell(void *fdt, int nodeoffset, + const char *name, uint32_t val) +{ + return fdt_appendprop_u32(fdt, nodeoffset, name, val); +} + +/** + * fdt_appendprop_string - append a string to a property + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to change + * @name: name of the property to change + * @str: string value to append to the property + * + * fdt_appendprop_string() appends the given string to the value of + * the named property in the given node, or creates a new property + * with that value if it does not already exist. + * + * This function may insert data into the blob, and will therefore + * change the offsets of some existing nodes. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to + * contain the new property value + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_TRUNCATED, standard meanings + */ +#define fdt_appendprop_string(fdt, nodeoffset, name, str) \ + fdt_appendprop((fdt), (nodeoffset), (name), (str), strlen(str)+1) + +/** + * fdt_appendprop_addrrange - append a address range property + * @fdt: pointer to the device tree blob + * @parent: offset of the parent node + * @nodeoffset: offset of the node to add a property at + * @name: name of property + * @addr: start address of a given range + * @size: size of a given range + * + * fdt_appendprop_addrrange() appends an address range value (start + * address and size) to the value of the named property in the given + * node, or creates a new property with that value if it does not + * already exist. + * If "name" is not specified, a default "reg" is used. + * Cell sizes are determined by parent's #address-cells and #size-cells. + * + * This function may insert data into the blob, and will therefore + * change the offsets of some existing nodes. + * + * returns: + * 0, on success + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADNCELLS, if the node has a badly formatted or invalid + * #address-cells property + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADVALUE, addr or size doesn't fit to respective cells size + * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to + * contain a new property + * -FDT_ERR_TRUNCATED, standard meanings + */ +int fdt_appendprop_addrrange(void *fdt, int parent, int nodeoffset, + const char *name, uint64_t addr, uint64_t size); + +/** + * fdt_delprop - delete a property + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to nop + * @name: name of the property to nop + * + * fdt_del_property() will delete the given property. + * + * This function will delete data from the blob, and will therefore + * change the offsets of some existing nodes. + * + * returns: + * 0, on success + * -FDT_ERR_NOTFOUND, node does not have the named property + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings + */ +int fdt_delprop(void *fdt, int nodeoffset, const char *name); + +/** + * fdt_add_subnode_namelen - creates a new node based on substring + * @fdt: pointer to the device tree blob + * @parentoffset: structure block offset of a node + * @name: name of the subnode to locate + * @namelen: number of characters of name to consider + * + * Identical to fdt_add_subnode(), but use only the first namelen + * characters of name as the name of the new node. This is useful for + * creating subnodes based on a portion of a larger string, such as a + * full path. + */ +#ifndef SWIG /* Not available in Python */ +int fdt_add_subnode_namelen(void *fdt, int parentoffset, + const char *name, int namelen); +#endif + +/** + * fdt_add_subnode - creates a new node + * @fdt: pointer to the device tree blob + * @parentoffset: structure block offset of a node + * @name: name of the subnode to locate + * + * fdt_add_subnode() creates a new node as a subnode of the node at + * structure block offset parentoffset, with the given name (which + * should include the unit address, if any). + * + * This function will insert data into the blob, and will therefore + * change the offsets of some existing nodes. + + * returns: + * structure block offset of the created nodeequested subnode (>=0), on + * success + * -FDT_ERR_NOTFOUND, if the requested subnode does not exist + * -FDT_ERR_BADOFFSET, if parentoffset did not point to an FDT_BEGIN_NODE + * tag + * -FDT_ERR_EXISTS, if the node at parentoffset already has a subnode of + * the given name + * -FDT_ERR_NOSPACE, if there is insufficient free space in the + * blob to contain the new node + * -FDT_ERR_NOSPACE + * -FDT_ERR_BADLAYOUT + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings. + */ +int fdt_add_subnode(void *fdt, int parentoffset, const char *name); + +/** + * fdt_del_node - delete a node (subtree) + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node to nop + * + * fdt_del_node() will remove the given node, including all its + * subnodes if any, from the blob. + * + * This function will delete data from the blob, and will therefore + * change the offsets of some existing nodes. + * + * returns: + * 0, on success + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings + */ +int fdt_del_node(void *fdt, int nodeoffset); + +/** + * fdt_overlay_apply - Applies a DT overlay on a base DT + * @fdt: pointer to the base device tree blob + * @fdto: pointer to the device tree overlay blob + * + * fdt_overlay_apply() will apply the given device tree overlay on the + * given base device tree. + * + * Expect the base device tree to be modified, even if the function + * returns an error. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, there's not enough space in the base device tree + * -FDT_ERR_NOTFOUND, the overlay points to some inexistant nodes or + * properties in the base DT + * -FDT_ERR_BADPHANDLE, + * -FDT_ERR_BADOVERLAY, + * -FDT_ERR_NOPHANDLES, + * -FDT_ERR_INTERNAL, + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADOFFSET, + * -FDT_ERR_BADPATH, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_TRUNCATED, standard meanings + */ +int fdt_overlay_apply(void *fdt, void *fdto); + +/**********************************************************************/ +/* Debugging / informational functions */ +/**********************************************************************/ + +const char *fdt_strerror(int errval); +#ifdef __cplusplus +} +#endif + +#endif /* LIBFDT_H */ diff --git a/vendor/riscv-isa-sim/fdt/libfdt_env.h b/vendor/riscv-isa-sim/fdt/libfdt_env.h new file mode 100644 index 00000000..44bd12a0 --- /dev/null +++ b/vendor/riscv-isa-sim/fdt/libfdt_env.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) */ +#ifndef LIBFDT_ENV_H +#define LIBFDT_ENV_H +/* + * libfdt - Flat Device Tree manipulation + * Copyright (C) 2006 David Gibson, IBM Corporation. + * Copyright 2012 Kim Phillips, Freescale Semiconductor. + */ + +#include +#include +#include +#include +#include + +#ifdef __CHECKER__ +#define FDT_FORCE __attribute__((force)) +#define FDT_BITWISE __attribute__((bitwise)) +#else +#define FDT_FORCE +#define FDT_BITWISE +#endif + +typedef uint16_t FDT_BITWISE fdt16_t; +typedef uint32_t FDT_BITWISE fdt32_t; +typedef uint64_t FDT_BITWISE fdt64_t; + +#define EXTRACT_BYTE(x, n) ((unsigned long long)((uint8_t *)&x)[n]) +#define CPU_TO_FDT16(x) ((EXTRACT_BYTE(x, 0) << 8) | EXTRACT_BYTE(x, 1)) +#define CPU_TO_FDT32(x) ((EXTRACT_BYTE(x, 0) << 24) | (EXTRACT_BYTE(x, 1) << 16) | \ + (EXTRACT_BYTE(x, 2) << 8) | EXTRACT_BYTE(x, 3)) +#define CPU_TO_FDT64(x) ((EXTRACT_BYTE(x, 0) << 56) | (EXTRACT_BYTE(x, 1) << 48) | \ + (EXTRACT_BYTE(x, 2) << 40) | (EXTRACT_BYTE(x, 3) << 32) | \ + (EXTRACT_BYTE(x, 4) << 24) | (EXTRACT_BYTE(x, 5) << 16) | \ + (EXTRACT_BYTE(x, 6) << 8) | EXTRACT_BYTE(x, 7)) + +static inline uint16_t fdt16_to_cpu(fdt16_t x) +{ + return (FDT_FORCE uint16_t)CPU_TO_FDT16(x); +} +static inline fdt16_t cpu_to_fdt16(uint16_t x) +{ + return (FDT_FORCE fdt16_t)CPU_TO_FDT16(x); +} + +static inline uint32_t fdt32_to_cpu(fdt32_t x) +{ + return (FDT_FORCE uint32_t)CPU_TO_FDT32(x); +} +static inline fdt32_t cpu_to_fdt32(uint32_t x) +{ + return (FDT_FORCE fdt32_t)CPU_TO_FDT32(x); +} + +static inline uint64_t fdt64_to_cpu(fdt64_t x) +{ + return (FDT_FORCE uint64_t)CPU_TO_FDT64(x); +} +static inline fdt64_t cpu_to_fdt64(uint64_t x) +{ + return (FDT_FORCE fdt64_t)CPU_TO_FDT64(x); +} +#undef CPU_TO_FDT64 +#undef CPU_TO_FDT32 +#undef CPU_TO_FDT16 +#undef EXTRACT_BYTE + +#ifdef __APPLE__ +#include + +/* strnlen() is not available on Mac OS < 10.7 */ +# if !defined(MAC_OS_X_VERSION_10_7) || (MAC_OS_X_VERSION_MAX_ALLOWED < \ + MAC_OS_X_VERSION_10_7) + +#define strnlen fdt_strnlen + +/* + * fdt_strnlen: returns the length of a string or max_count - which ever is + * smallest. + * Input 1 string: the string whose size is to be determined + * Input 2 max_count: the maximum value returned by this function + * Output: length of the string or max_count (the smallest of the two) + */ +static inline size_t fdt_strnlen(const char *string, size_t max_count) +{ + const char *p = memchr(string, 0, max_count); + return p ? p - string : max_count; +} + +#endif /* !defined(MAC_OS_X_VERSION_10_7) || (MAC_OS_X_VERSION_MAX_ALLOWED < + MAC_OS_X_VERSION_10_7) */ + +#endif /* __APPLE__ */ + +#endif /* LIBFDT_ENV_H */ diff --git a/vendor/riscv-isa-sim/fdt/libfdt_internal.h b/vendor/riscv-isa-sim/fdt/libfdt_internal.h new file mode 100644 index 00000000..741eeb31 --- /dev/null +++ b/vendor/riscv-isa-sim/fdt/libfdt_internal.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) */ +#ifndef LIBFDT_INTERNAL_H +#define LIBFDT_INTERNAL_H +/* + * libfdt - Flat Device Tree manipulation + * Copyright (C) 2006 David Gibson, IBM Corporation. + */ +#include + +#define FDT_ALIGN(x, a) (((x) + (a) - 1) & ~((a) - 1)) +#define FDT_TAGALIGN(x) (FDT_ALIGN((x), FDT_TAGSIZE)) + +int fdt_ro_probe_(const void *fdt); +#define FDT_RO_PROBE(fdt) \ + { \ + int totalsize_; \ + if ((totalsize_ = fdt_ro_probe_(fdt)) < 0) \ + return totalsize_; \ + } + +int fdt_check_node_offset_(const void *fdt, int offset); +int fdt_check_prop_offset_(const void *fdt, int offset); +const char *fdt_find_string_(const char *strtab, int tabsize, const char *s); +int fdt_node_end_offset_(void *fdt, int nodeoffset); + +static inline const void *fdt_offset_ptr_(const void *fdt, int offset) +{ + return (const char *)fdt + fdt_off_dt_struct(fdt) + offset; +} + +static inline void *fdt_offset_ptr_w_(void *fdt, int offset) +{ + return (void *)(uintptr_t)fdt_offset_ptr_(fdt, offset); +} + +static inline const struct fdt_reserve_entry *fdt_mem_rsv_(const void *fdt, int n) +{ + const struct fdt_reserve_entry *rsv_table = + (const struct fdt_reserve_entry *) + ((const char *)fdt + fdt_off_mem_rsvmap(fdt)); + + return rsv_table + n; +} +static inline struct fdt_reserve_entry *fdt_mem_rsv_w_(void *fdt, int n) +{ + return (void *)(uintptr_t)fdt_mem_rsv_(fdt, n); +} + +#define FDT_SW_MAGIC (~FDT_MAGIC) + +#endif /* LIBFDT_INTERNAL_H */ diff --git a/vendor/riscv-isa-sim/fesvr/byteorder.h b/vendor/riscv-isa-sim/fesvr/byteorder.h new file mode 100644 index 00000000..2b1dbf98 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/byteorder.h @@ -0,0 +1,94 @@ +// See LICENSE for license details. + +#ifndef _RISCV_BYTEORDER_H +#define _RISCV_BYTEORDER_H + +#include "config.h" +#include + +static inline uint8_t swap(uint8_t n) { return n; } +static inline uint16_t swap(uint16_t n) { return (n >> 8) | (n << 8); } +static inline uint32_t swap(uint32_t n) { return (swap(uint16_t(n)) << 16) | swap(uint16_t(n >> 16)); } +static inline uint64_t swap(uint64_t n) { return (uint64_t(swap(uint32_t(n))) << 32) | swap(uint32_t(n >> 32)); } +static inline int8_t swap(int8_t n) { return n; } +static inline int16_t swap(int16_t n) { return int16_t(swap(uint16_t(n))); } +static inline int32_t swap(int32_t n) { return int32_t(swap(uint32_t(n))); } +static inline int64_t swap(int64_t n) { return int64_t(swap(uint64_t(n))); } + +#ifdef WORDS_BIGENDIAN +template static inline T from_be(T n) { return n; } +template static inline T to_be(T n) { return n; } +template static inline T from_le(T n) { return swap(n); } +template static inline T to_le(T n) { return swap(n); } +#else +template static inline T from_le(T n) { return n; } +template static inline T to_le(T n) { return n; } +template static inline T from_be(T n) { return swap(n); } +template static inline T to_be(T n) { return swap(n); } +#endif + +// Wrapper to mark a value as target endian, to guide conversion code + +template class base_endian { + + protected: + T value; + + base_endian(T n) : value(n) {} + + public: + // Setting to and testing against zero never needs swapping + base_endian() : value(0) {} + bool operator!() { return !value; } + + // Bitwise logic operations can be performed without swapping + base_endian& operator|=(const base_endian& rhs) { value |= rhs.value; return *this; } + base_endian& operator&=(const base_endian& rhs) { value &= rhs.value; return *this; } + base_endian& operator^=(const base_endian& rhs) { value ^= rhs.value; return *this; } + + inline T from_be() { return ::from_be(value); } + inline T from_le() { return ::from_le(value); } +}; + +template class target_endian : public base_endian { + protected: + target_endian(T n) : base_endian(n) {} + + public: + target_endian() {} + + static inline target_endian to_be(T n) { return target_endian(::to_be(n)); } + static inline target_endian to_le(T n) { return target_endian(::to_le(n)); } + + // Useful values over which swapping is identity + static const target_endian zero; + static const target_endian all_ones; +}; + +template const target_endian target_endian::zero = target_endian(T(0)); +template const target_endian target_endian::all_ones = target_endian(~T(0)); + + +// Specializations with implicit conversions (no swap information needed) + +template<> class target_endian : public base_endian { + public: + target_endian() {} + target_endian(uint8_t n) : base_endian(n) {} + operator uint8_t() { return value; } + + static inline target_endian to_be(uint8_t n) { return target_endian(n); } + static inline target_endian to_le(uint8_t n) { return target_endian(n); } +}; + +template<> class target_endian : public base_endian { + public: + target_endian() {} + target_endian(int8_t n) : base_endian(n) {} + operator int8_t() { return value; } + + static inline target_endian to_be(int8_t n) { return target_endian(n); } + static inline target_endian to_le(int8_t n) { return target_endian(n); } +}; + +#endif diff --git a/vendor/riscv-isa-sim/fesvr/context.cc b/vendor/riscv-isa-sim/fesvr/context.cc new file mode 100644 index 00000000..ca738137 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/context.cc @@ -0,0 +1,115 @@ +#include "context.h" +#include +#include +#include + +static __thread context_t* cur; + +context_t::context_t() + : creator(NULL), func(NULL), arg(NULL), +#ifndef USE_UCONTEXT + mutex(PTHREAD_MUTEX_INITIALIZER), + cond(PTHREAD_COND_INITIALIZER), flag(0) +#else + context(new ucontext_t) +#endif +{ +} + +#ifdef USE_UCONTEXT +#ifndef GLIBC_64BIT_PTR_BUG +void context_t::wrapper(context_t* ctx) +{ +#else +void context_t::wrapper(unsigned int hi, unsigned int lo) +{ + context_t* ctx = reinterpret_cast(static_cast(lo) | (static_cast(hi) << 32)); +#endif + ctx->creator->switch_to(); + ctx->func(ctx->arg); +} +#else +void* context_t::wrapper(void* a) +{ + context_t* ctx = static_cast(a); + cur = ctx; + ctx->creator->switch_to(); + + ctx->func(ctx->arg); + return NULL; +} +#endif + +void context_t::init(void (*f)(void*), void* a) +{ + func = f; + arg = a; + creator = current(); + +#ifdef USE_UCONTEXT + getcontext(context.get()); + context->uc_link = creator->context.get(); + context->uc_stack.ss_size = 64*1024; + context->uc_stack.ss_sp = new void*[context->uc_stack.ss_size/sizeof(void*)]; +#ifndef GLIBC_64BIT_PTR_BUG + makecontext(context.get(), (void(*)(void))&context_t::wrapper, 1, this); +#else + unsigned int hi(reinterpret_cast(this) >> 32); + unsigned int lo(reinterpret_cast(this)); + makecontext(context.get(), (void(*)(void))&context_t::wrapper, 2, hi, lo); +#endif + switch_to(); +#else + assert(flag == 0); + + pthread_mutex_lock(&creator->mutex); + creator->flag = 0; + if (pthread_create(&thread, NULL, &context_t::wrapper, this) != 0) + abort(); + pthread_detach(thread); + while (!creator->flag) + pthread_cond_wait(&creator->cond, &creator->mutex); + pthread_mutex_unlock(&creator->mutex); +#endif +} + +context_t::~context_t() +{ + assert(this != cur); +} + +void context_t::switch_to() +{ + assert(this != cur); +#ifdef USE_UCONTEXT + context_t* prev = cur; + cur = this; + if (swapcontext(prev->context.get(), context.get()) != 0) + abort(); +#else + cur->flag = 0; + this->flag = 1; + pthread_mutex_lock(&this->mutex); + pthread_cond_signal(&this->cond); + pthread_mutex_unlock(&this->mutex); + pthread_mutex_lock(&cur->mutex); + while (!cur->flag) + pthread_cond_wait(&cur->cond, &cur->mutex); + pthread_mutex_unlock(&cur->mutex); +#endif +} + +context_t* context_t::current() +{ + if (cur == NULL) + { + cur = new context_t; +#ifdef USE_UCONTEXT + getcontext(cur->context.get()); +#else + cur->thread = pthread_self(); + cur->flag = 1; +#endif + } + return cur; +} diff --git a/vendor/riscv-isa-sim/fesvr/context.h b/vendor/riscv-isa-sim/fesvr/context.h new file mode 100644 index 00000000..18bf50ef --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/context.h @@ -0,0 +1,54 @@ +#ifndef _HTIF_CONTEXT_H +#define _HTIF_CONTEXT_H + +// A replacement for ucontext.h, which is sadly deprecated. + +#include + +#if defined(__GLIBC__) +# undef USE_UCONTEXT +# define USE_UCONTEXT +# include +# include +#include + +#if (ULONG_MAX > UINT_MAX) // 64-bit systems only +#if (100*GLIB_MAJOR_VERSION+GLIB_MINOR_VERSION < 208) +#define GLIBC_64BIT_PTR_BUG +static_assert (sizeof(unsigned int) == 4, "uint size doesn't match expected 32bit"); +static_assert (sizeof(unsigned long) == 8, "ulong size doesn't match expected 64bit"); +static_assert (sizeof(void*) == 8, "ptr size doesn't match expected 64bit"); +#endif +#endif /* ULONG_MAX > UINT_MAX */ + +#endif + +class context_t +{ + public: + context_t(); + ~context_t(); + void init(void (*func)(void*), void* arg); + void switch_to(); + static context_t* current(); + private: + context_t* creator; + void (*func)(void*); + void* arg; +#ifdef USE_UCONTEXT + std::unique_ptr context; +#ifndef GLIBC_64BIT_PTR_BUG + static void wrapper(context_t*); +#else + static void wrapper(unsigned int, unsigned int); +#endif +#else + pthread_t thread; + pthread_mutex_t mutex; + pthread_cond_t cond; + volatile int flag; + static void* wrapper(void*); +#endif +}; + +#endif diff --git a/vendor/riscv-isa-sim/fesvr/debug_defines.h b/vendor/riscv-isa-sim/fesvr/debug_defines.h new file mode 100644 index 00000000..e5f92910 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/debug_defines.h @@ -0,0 +1,1418 @@ +#define DTM_IDCODE 0x01 +/* +* Identifies the release version of this part. + */ +#define DTM_IDCODE_VERSION_OFFSET 28 +#define DTM_IDCODE_VERSION_LENGTH 4 +#define DTM_IDCODE_VERSION (0xf << DTM_IDCODE_VERSION_OFFSET) +/* +* Identifies the designer's part number of this part. + */ +#define DTM_IDCODE_PARTNUMBER_OFFSET 12 +#define DTM_IDCODE_PARTNUMBER_LENGTH 16 +#define DTM_IDCODE_PARTNUMBER (0xffff << DTM_IDCODE_PARTNUMBER_OFFSET) +/* +* Identifies the designer/manufacturer of this part. Bits 6:0 must be +* bits 6:0 of the designer/manufacturer's Identification Code as +* assigned by JEDEC Standard JEP106. Bits 10:7 contain the modulo-16 +* count of the number of continuation characters (0x7f) in that same +* Identification Code. + */ +#define DTM_IDCODE_MANUFID_OFFSET 1 +#define DTM_IDCODE_MANUFID_LENGTH 11 +#define DTM_IDCODE_MANUFID (0x7ff << DTM_IDCODE_MANUFID_OFFSET) +#define DTM_IDCODE_1_OFFSET 0 +#define DTM_IDCODE_1_LENGTH 1 +#define DTM_IDCODE_1 (0x1 << DTM_IDCODE_1_OFFSET) +#define DTM_DTMCS 0x10 +/* +* Writing 1 to this bit does a hard reset of the DTM, +* causing the DTM to forget about any outstanding DMI transactions. +* In general this should only be used when the Debugger has +* reason to expect that the outstanding DMI transaction will never +* complete (e.g. a reset condition caused an inflight DMI transaction to +* be cancelled). + */ +#define DTM_DTMCS_DMIHARDRESET_OFFSET 17 +#define DTM_DTMCS_DMIHARDRESET_LENGTH 1 +#define DTM_DTMCS_DMIHARDRESET (0x1 << DTM_DTMCS_DMIHARDRESET_OFFSET) +/* +* Writing 1 to this bit clears the sticky error state +* and allows the DTM to retry or complete the previous +* transaction. + */ +#define DTM_DTMCS_DMIRESET_OFFSET 16 +#define DTM_DTMCS_DMIRESET_LENGTH 1 +#define DTM_DTMCS_DMIRESET (0x1 << DTM_DTMCS_DMIRESET_OFFSET) +/* +* This is a hint to the debugger of the minimum number of +* cycles a debugger should spend in +* Run-Test/Idle after every DMI scan to avoid a `busy' +* return code (\Fdmistat of 3). A debugger must still +* check \Fdmistat when necessary. +* +* 0: It is not necessary to enter Run-Test/Idle at all. +* +* 1: Enter Run-Test/Idle and leave it immediately. +* +* 2: Enter Run-Test/Idle and stay there for 1 cycle before leaving. +* +* And so on. + */ +#define DTM_DTMCS_IDLE_OFFSET 12 +#define DTM_DTMCS_IDLE_LENGTH 3 +#define DTM_DTMCS_IDLE (0x7 << DTM_DTMCS_IDLE_OFFSET) +/* +* 0: No error. +* +* 1: Reserved. Interpret the same as 2. +* +* 2: An operation failed (resulted in \Fop of 2). +* +* 3: An operation was attempted while a DMI access was still in +* progress (resulted in \Fop of 3). + */ +#define DTM_DTMCS_DMISTAT_OFFSET 10 +#define DTM_DTMCS_DMISTAT_LENGTH 2 +#define DTM_DTMCS_DMISTAT (0x3 << DTM_DTMCS_DMISTAT_OFFSET) +/* +* The size of \Faddress in \Rdmi. + */ +#define DTM_DTMCS_ABITS_OFFSET 4 +#define DTM_DTMCS_ABITS_LENGTH 6 +#define DTM_DTMCS_ABITS (0x3f << DTM_DTMCS_ABITS_OFFSET) +/* +* 0: Version described in spec version 0.11. +* +* 1: Version described in spec version 0.13 (and later?), which +* reduces the DMI data width to 32 bits. +* +* Other values are reserved for future use. + */ +#define DTM_DTMCS_VERSION_OFFSET 0 +#define DTM_DTMCS_VERSION_LENGTH 4 +#define DTM_DTMCS_VERSION (0xf << DTM_DTMCS_VERSION_OFFSET) +#define DTM_DMI 0x11 +/* +* Address used for DMI access. In Update-DR this value is used +* to access the DM over the DMI. + */ +#define DTM_DMI_ADDRESS_OFFSET 34 +#define DTM_DMI_ADDRESS_LENGTH abits +#define DTM_DMI_ADDRESS (((1L< +#include +#include +#include +#include +#include +#include +#include +using namespace std::placeholders; + +device_t::device_t() + : command_handlers(command_t::MAX_COMMANDS), + command_names(command_t::MAX_COMMANDS) +{ + for (size_t cmd = 0; cmd < command_t::MAX_COMMANDS; cmd++) + register_command(cmd, std::bind(&device_t::handle_null_command, this, _1), ""); + register_command(command_t::MAX_COMMANDS-1, std::bind(&device_t::handle_identify, this, _1), "identity"); +} + +void device_t::register_command(size_t cmd, command_func_t handler, const char* name) +{ + assert(cmd < command_t::MAX_COMMANDS); + assert(strlen(name) < IDENTITY_SIZE); + command_handlers[cmd] = handler; + command_names[cmd] = name; +} + +void device_t::handle_command(command_t cmd) +{ + command_handlers[cmd.cmd()](cmd); +} + +void device_t::handle_null_command(command_t cmd) +{ +} + +void device_t::handle_identify(command_t cmd) +{ + size_t what = cmd.payload() % command_t::MAX_COMMANDS; + uint64_t addr = cmd.payload() / command_t::MAX_COMMANDS; + + char id[IDENTITY_SIZE] = {0}; + if (what == command_t::MAX_COMMANDS-1) + { + assert(strlen(identity()) < IDENTITY_SIZE); + strcpy(id, identity()); + } + else + strcpy(id, command_names[what].c_str()); + + cmd.memif().write(addr, IDENTITY_SIZE, id); + cmd.respond(1); +} + +bcd_t::bcd_t() +{ + register_command(0, std::bind(&bcd_t::handle_read, this, _1), "read"); + register_command(1, std::bind(&bcd_t::handle_write, this, _1), "write"); +} + +void bcd_t::handle_read(command_t cmd) +{ + pending_reads.push(cmd); +} + +void bcd_t::handle_write(command_t cmd) +{ + canonical_terminal_t::write(cmd.payload()); +} + +void bcd_t::tick() +{ + int ch; + if (!pending_reads.empty() && (ch = canonical_terminal_t::read()) != -1) + { + pending_reads.front().respond(0x100 | ch); + pending_reads.pop(); + } +} + +disk_t::disk_t(const char* fn) +{ + fd = ::open(fn, O_RDWR); + if (fd < 0) + throw std::runtime_error("could not open " + std::string(fn)); + + register_command(0, std::bind(&disk_t::handle_read, this, _1), "read"); + register_command(1, std::bind(&disk_t::handle_write, this, _1), "write"); + + struct stat st; + if (fstat(fd, &st) < 0) + throw std::runtime_error("could not stat " + std::string(fn)); + + size = st.st_size; + id = "disk size=" + std::to_string(size); +} + +disk_t::~disk_t() +{ + close(fd); +} + +void disk_t::handle_read(command_t cmd) +{ + request_t req; + cmd.memif().read(cmd.payload(), sizeof(req), &req); + + std::vector buf(req.size); + if ((size_t)::pread(fd, buf.data(), buf.size(), req.offset) != req.size) + throw std::runtime_error("could not read " + id + " @ " + std::to_string(req.offset)); + + cmd.memif().write(req.addr, buf.size(), buf.data()); + cmd.respond(req.tag); +} + +void disk_t::handle_write(command_t cmd) +{ + request_t req; + cmd.memif().read(cmd.payload(), sizeof(req), &req); + + std::vector buf(req.size); + cmd.memif().read(req.addr, buf.size(), buf.data()); + + if ((size_t)::pwrite(fd, buf.data(), buf.size(), req.offset) != req.size) + throw std::runtime_error("could not write " + id + " @ " + std::to_string(req.offset)); + + cmd.respond(req.tag); +} + +device_list_t::device_list_t() + : devices(command_t::MAX_COMMANDS, &null_device), num_devices(0) +{ +} + +void device_list_t::register_device(device_t* dev) +{ + num_devices++; + assert(num_devices < command_t::MAX_DEVICES); + devices[num_devices-1] = dev; +} + +void device_list_t::handle_command(command_t cmd) +{ + devices[cmd.device()]->handle_command(cmd); +} + +void device_list_t::tick() +{ + for (size_t i = 0; i < num_devices; i++) + devices[i]->tick(); +} diff --git a/vendor/riscv-isa-sim/fesvr/device.h b/vendor/riscv-isa-sim/fesvr/device.h new file mode 100644 index 00000000..1387b745 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/device.h @@ -0,0 +1,118 @@ +#ifndef _DEVICE_H +#define _DEVICE_H + +#include +#include +#include +#include +#include + +class memif_t; + +class command_t +{ + public: + typedef std::function callback_t; + command_t(memif_t& memif, uint64_t tohost, callback_t cb) + : _memif(memif), tohost(tohost), cb(cb) {} + + memif_t& memif() { return _memif; } + uint8_t device() { return tohost >> 56; } + uint8_t cmd() { return tohost >> 48; } + uint64_t payload() { return tohost << 16 >> 16; } + void respond(uint64_t resp) { cb((tohost >> 48 << 48) | (resp << 16 >> 16)); } + + static const size_t MAX_COMMANDS = 256; + static const size_t MAX_DEVICES = 256; + + private: + memif_t& _memif; + uint64_t tohost; + callback_t cb; +}; + +class device_t +{ + public: + device_t(); + virtual ~device_t() {} + virtual const char* identity() = 0; + virtual void tick() {} + + void handle_command(command_t cmd); + + protected: + typedef std::function command_func_t; + void register_command(size_t, command_func_t, const char*); + + private: + device_t& operator = (const device_t&); // disallow + device_t(const device_t&); // disallow + + static const size_t IDENTITY_SIZE = 64; + void handle_null_command(command_t cmd); + void handle_identify(command_t cmd); + + std::vector command_handlers; + std::vector command_names; +}; + +class bcd_t : public device_t +{ + public: + bcd_t(); + const char* identity() { return "bcd"; } + void tick(); + + private: + void handle_read(command_t cmd); + void handle_write(command_t cmd); + + std::queue pending_reads; +}; + +class disk_t : public device_t +{ + public: + disk_t(const char* fn); + ~disk_t(); + const char* identity() { return id.c_str(); } + + private: + struct request_t + { + uint64_t addr; + uint64_t offset; + uint64_t size; + uint64_t tag; + }; + + void handle_read(command_t cmd); + void handle_write(command_t cmd); + + std::string id; + size_t size; + int fd; +}; + +class null_device_t : public device_t +{ + public: + const char* identity() { return ""; } +}; + +class device_list_t +{ + public: + device_list_t(); + void register_device(device_t* dev); + void handle_command(command_t cmd); + void tick(); + + private: + std::vector devices; + null_device_t null_device; + size_t num_devices; +}; + +#endif diff --git a/vendor/riscv-isa-sim/fesvr/dtm.cc b/vendor/riscv-isa-sim/fesvr/dtm.cc new file mode 100644 index 00000000..b5de14c0 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/dtm.cc @@ -0,0 +1,644 @@ +#include "dtm.h" +#include "debug_defines.h" +#include +#include +#include +#include +#include +#include + +#define RV_X(x, s, n) \ + (((x) >> (s)) & ((1 << (n)) - 1)) +#define ENCODE_ITYPE_IMM(x) \ + (RV_X(x, 0, 12) << 20) +#define ENCODE_STYPE_IMM(x) \ + ((RV_X(x, 0, 5) << 7) | (RV_X(x, 5, 7) << 25)) +#define ENCODE_SBTYPE_IMM(x) \ + ((RV_X(x, 1, 4) << 8) | (RV_X(x, 5, 6) << 25) | (RV_X(x, 11, 1) << 7) | (RV_X(x, 12, 1) << 31)) +#define ENCODE_UTYPE_IMM(x) \ + (RV_X(x, 12, 20) << 12) +#define ENCODE_UJTYPE_IMM(x) \ + ((RV_X(x, 1, 10) << 21) | (RV_X(x, 11, 1) << 20) | (RV_X(x, 12, 8) << 12) | (RV_X(x, 20, 1) << 31)) + +#define LOAD(xlen, dst, base, imm) \ + (((xlen) == 64 ? 0x00003003 : 0x00002003) \ + | ((dst) << 7) | ((base) << 15) | (uint32_t)ENCODE_ITYPE_IMM(imm)) +#define STORE(xlen, src, base, imm) \ + (((xlen) == 64 ? 0x00003023 : 0x00002023) \ + | ((src) << 20) | ((base) << 15) | (uint32_t)ENCODE_STYPE_IMM(imm)) +#define JUMP(there, here) (0x6f | (uint32_t)ENCODE_UJTYPE_IMM((there) - (here))) +#define BNE(r1, r2, there, here) (0x1063 | ((r1) << 15) | ((r2) << 20) | (uint32_t)ENCODE_SBTYPE_IMM((there) - (here))) +#define ADDI(dst, src, imm) (0x13 | ((dst) << 7) | ((src) << 15) | (uint32_t)ENCODE_ITYPE_IMM(imm)) +#define SRL(dst, src, sh) (0x5033 | ((dst) << 7) | ((src) << 15) | ((sh) << 20)) +#define FENCE_I 0x100f +#define EBREAK 0x00100073 +#define X0 0 +#define S0 8 +#define S1 9 + +#define AC_AR_REGNO(x) ((0x1000 | x) << AC_ACCESS_REGISTER_REGNO_OFFSET) +#define AC_AR_SIZE(x) (((x == 128)? 4 : (x == 64 ? 3 : 2)) << AC_ACCESS_REGISTER_SIZE_OFFSET) + +#define WRITE 1 +#define SET 2 +#define CLEAR 3 +#define CSRRx(type, dst, csr, src) (0x73 | ((type) << 12) | ((dst) << 7) | ((src) << 15) | (uint32_t)((csr) << 20)) + +#define get_field(reg, mask) (((reg) & (mask)) / ((mask) & ~((mask) << 1))) +#define set_field(reg, mask, val) (((reg) & ~(mask)) | (((val) * ((mask) & ~((mask) << 1))) & (mask))) + +#define RUN_AC_OR_DIE(a, b, c, d, e) { \ + uint32_t cmderr = run_abstract_command(a, b, c, d, e); \ + if (cmderr) { \ + die(cmderr); \ + } \ + } + +uint32_t dtm_t::do_command(dtm_t::req r) +{ + req_buf = r; + target->switch_to(); + assert(resp_buf.resp == 0); + return resp_buf.data; +} + +uint32_t dtm_t::read(uint32_t addr) +{ + return do_command((req){addr, 1, 0}); +} + +uint32_t dtm_t::write(uint32_t addr, uint32_t data) +{ + return do_command((req){addr, 2, data}); +} + +void dtm_t::nop() +{ + do_command((req){0, 0, 0}); +} + +void dtm_t::select_hart(int hartsel) { + int dmcontrol = read(DMI_DMCONTROL); + write (DMI_DMCONTROL, set_field(dmcontrol, DMI_DMCONTROL_HARTSEL, hartsel)); + current_hart = hartsel; +} + +int dtm_t::enumerate_harts() { + int max_hart = (1 << DMI_DMCONTROL_HARTSEL_LENGTH) - 1; + write(DMI_DMCONTROL, set_field(read(DMI_DMCONTROL), DMI_DMCONTROL_HARTSEL, max_hart)); + read(DMI_DMSTATUS); + max_hart = get_field(read(DMI_DMCONTROL), DMI_DMCONTROL_HARTSEL); + + int hartsel; + for (hartsel = 0; hartsel <= max_hart; hartsel++) { + select_hart(hartsel); + int dmstatus = read(DMI_DMSTATUS); + if (get_field(dmstatus, DMI_DMSTATUS_ANYNONEXISTENT)) + break; + } + return hartsel; +} + +void dtm_t::halt(int hartsel) +{ + if (running) { + write(DMI_DMCONTROL, DMI_DMCONTROL_DMACTIVE); + // Read dmstatus to avoid back-to-back writes to dmcontrol. + read(DMI_DMSTATUS); + } + + int dmcontrol = DMI_DMCONTROL_HALTREQ | DMI_DMCONTROL_DMACTIVE; + dmcontrol = set_field(dmcontrol, DMI_DMCONTROL_HARTSEL, hartsel); + write(DMI_DMCONTROL, dmcontrol); + int dmstatus; + do { + dmstatus = read(DMI_DMSTATUS); + } while(get_field(dmstatus, DMI_DMSTATUS_ALLHALTED) == 0); + dmcontrol &= ~DMI_DMCONTROL_HALTREQ; + write(DMI_DMCONTROL, dmcontrol); + // Read dmstatus to avoid back-to-back writes to dmcontrol. + read(DMI_DMSTATUS); + current_hart = hartsel; +} + +void dtm_t::resume(int hartsel) +{ + int dmcontrol = DMI_DMCONTROL_RESUMEREQ | DMI_DMCONTROL_DMACTIVE; + dmcontrol = set_field(dmcontrol, DMI_DMCONTROL_HARTSEL, hartsel); + write(DMI_DMCONTROL, dmcontrol); + int dmstatus; + do { + dmstatus = read(DMI_DMSTATUS); + } while (get_field(dmstatus, DMI_DMSTATUS_ALLRESUMEACK) == 0); + dmcontrol &= ~DMI_DMCONTROL_RESUMEREQ; + write(DMI_DMCONTROL, dmcontrol); + // Read dmstatus to avoid back-to-back writes to dmcontrol. + read(DMI_DMSTATUS); + current_hart = hartsel; + + if (running) { + write(DMI_DMCONTROL, DMI_DMCONTROL_DMACTIVE); + // Read dmstatus to avoid back-to-back writes to dmcontrol. + read(DMI_DMSTATUS); + } +} + +uint64_t dtm_t::save_reg(unsigned regno) +{ + uint32_t data[xlen/(8*4)]; + uint32_t command = AC_ACCESS_REGISTER_TRANSFER | AC_AR_SIZE(xlen) | AC_AR_REGNO(regno); + RUN_AC_OR_DIE(command, 0, 0, data, xlen / (8*4)); + + uint64_t result = data[0]; + if (xlen > 32) { + result |= ((uint64_t)data[1]) << 32; + } + return result; +} + +void dtm_t::restore_reg(unsigned regno, uint64_t val) +{ + uint32_t data[xlen/(8*4)]; + data[0] = (uint32_t) val; + if (xlen > 32) { + data[1] = (uint32_t) (val >> 32); + } + + uint32_t command = AC_ACCESS_REGISTER_TRANSFER | + AC_ACCESS_REGISTER_WRITE | + AC_AR_SIZE(xlen) | + AC_AR_REGNO(regno); + + RUN_AC_OR_DIE(command, 0, 0, data, xlen / (8*4)); + +} + +uint32_t dtm_t::run_abstract_command(uint32_t command, + const uint32_t program[], size_t program_n, + uint32_t data[], size_t data_n) +{ + assert(program_n <= ram_words); + assert(data_n <= data_words); + + for (size_t i = 0; i < program_n; i++) { + write(DMI_PROGBUF0 + i, program[i]); + } + + if (get_field(command, AC_ACCESS_REGISTER_WRITE) && + get_field(command, AC_ACCESS_REGISTER_TRANSFER)) { + for (size_t i = 0; i < data_n; i++) { + write(DMI_DATA0 + i, data[i]); + } + } + + write(DMI_COMMAND, command); + + // Wait for not busy and then check for error. + uint32_t abstractcs; + do { + abstractcs = read(DMI_ABSTRACTCS); + } while (abstractcs & DMI_ABSTRACTCS_BUSY); + + if ((get_field(command, AC_ACCESS_REGISTER_WRITE) == 0) && + get_field(command, AC_ACCESS_REGISTER_TRANSFER)) { + for (size_t i = 0; i < data_n; i++){ + data[i] = read(DMI_DATA0 + i); + } + } + + return get_field(abstractcs, DMI_ABSTRACTCS_CMDERR); + +} + +size_t dtm_t::chunk_align() +{ + return xlen / 8; +} + +void dtm_t::read_chunk(uint64_t taddr, size_t len, void* dst) +{ + uint32_t prog[ram_words]; + uint32_t data[data_words]; + + uint8_t * curr = (uint8_t*) dst; + + halt(current_hart); + + uint64_t s0 = save_reg(S0); + uint64_t s1 = save_reg(S1); + + prog[0] = LOAD(xlen, S1, S0, 0); + prog[1] = ADDI(S0, S0, xlen/8); + prog[2] = EBREAK; + + data[0] = (uint32_t) taddr; + if (xlen > 32) { + data[1] = (uint32_t) (taddr >> 32); + } + + // Write s0 with the address, then execute program buffer. + // This will get S1 with the data and increment s0. + uint32_t command = AC_ACCESS_REGISTER_TRANSFER | + AC_ACCESS_REGISTER_WRITE | + AC_ACCESS_REGISTER_POSTEXEC | + AC_AR_SIZE(xlen) | + AC_AR_REGNO(S0); + + RUN_AC_OR_DIE(command, prog, 3, data, xlen/(4*8)); + + // TODO: could use autoexec here. + for (size_t i = 0; i < (len * 8 / xlen); i++){ + command = AC_ACCESS_REGISTER_TRANSFER | + AC_AR_SIZE(xlen) | + AC_AR_REGNO(S1); + if ((i + 1) < (len * 8 / xlen)) { + command |= AC_ACCESS_REGISTER_POSTEXEC; + } + + RUN_AC_OR_DIE(command, 0, 0, data, xlen/(4*8)); + + memcpy(curr, data, xlen/8); + curr += xlen/8; + } + + restore_reg(S0, s0); + restore_reg(S1, s1); + + resume(current_hart); + +} + +void dtm_t::write_chunk(uint64_t taddr, size_t len, const void* src) +{ + uint32_t prog[ram_words]; + uint32_t data[data_words]; + + const uint8_t * curr = (const uint8_t*) src; + + halt(current_hart); + + uint64_t s0 = save_reg(S0); + uint64_t s1 = save_reg(S1); + + prog[0] = STORE(xlen, S1, S0, 0); + prog[1] = ADDI(S0, S0, xlen/8); + prog[2] = EBREAK; + + data[0] = (uint32_t) taddr; + if (xlen > 32) { + data[1] = (uint32_t) (taddr >> 32); + } + + // Write the program (not used yet). + // Write s0 with the address. + uint32_t command = AC_ACCESS_REGISTER_TRANSFER | + AC_ACCESS_REGISTER_WRITE | + AC_AR_SIZE(xlen) | + AC_AR_REGNO(S0); + + RUN_AC_OR_DIE(command, prog, 3, data, xlen/(4*8)); + + // Use Autoexec for more than one word of transfer. + // Write S1 with data, then execution stores S1 to + // 0(S0) and increments S0. + // Each time we write XLEN bits. + memcpy(data, curr, xlen/8); + curr += xlen/8; + + command = AC_ACCESS_REGISTER_TRANSFER | + AC_ACCESS_REGISTER_POSTEXEC | + AC_ACCESS_REGISTER_WRITE | + AC_AR_SIZE(xlen) | + AC_AR_REGNO(S1); + + RUN_AC_OR_DIE(command, 0, 0, data, xlen/(4*8)); + + uint32_t abstractcs; + for (size_t i = 1; i < (len * 8 / xlen); i++){ + if (i == 1) { + write(DMI_ABSTRACTAUTO, 1 << DMI_ABSTRACTAUTO_AUTOEXECDATA_OFFSET); + } + memcpy(data, curr, xlen/8); + curr += xlen/8; + if (xlen == 64) { + write(DMI_DATA0 + 1, data[1]); + } + write(DMI_DATA0, data[0]); //Triggers a command w/ autoexec. + + do { + abstractcs = read(DMI_ABSTRACTCS); + } while (abstractcs & DMI_ABSTRACTCS_BUSY); + if ( get_field(abstractcs, DMI_ABSTRACTCS_CMDERR)) { + die(get_field(abstractcs, DMI_ABSTRACTCS_CMDERR)); + } + } + if ((len * 8 / xlen) > 1) { + write(DMI_ABSTRACTAUTO, 0); + } + + restore_reg(S0, s0); + restore_reg(S1, s1); + resume(current_hart); +} + +void dtm_t::die(uint32_t cmderr) +{ + const char * codes[] = { + "OK", + "BUSY", + "NOT_SUPPORTED", + "EXCEPTION", + "HALT/RESUME" + }; + const char * msg; + if (cmderr < (sizeof(codes) / sizeof(*codes))){ + msg = codes[cmderr]; + } else { + msg = "OTHER"; + } + //throw std::runtime_error("Debug Abstract Command Error #" + std::to_string(cmderr) + "(" + msg + ")"); + printf("ERROR: %s:%d, Debug Abstract Command Error #%d (%s)", __FILE__, __LINE__, cmderr, msg); + printf("ERROR: %s:%d, Should die, but allowing simulation to continue and fail.", __FILE__, __LINE__); + write(DMI_ABSTRACTCS, DMI_ABSTRACTCS_CMDERR); + +} + +void dtm_t::clear_chunk(uint64_t taddr, size_t len) +{ + uint32_t prog[ram_words]; + uint32_t data[data_words]; + + halt(current_hart); + uint64_t s0 = save_reg(S0); + uint64_t s1 = save_reg(S1); + + uint32_t command; + + // S0 = Addr + data[0] = (uint32_t) taddr; + data[1] = (uint32_t) (taddr >> 32); + command = AC_ACCESS_REGISTER_TRANSFER | + AC_ACCESS_REGISTER_WRITE | + AC_AR_SIZE(xlen) | + AC_AR_REGNO(S0); + RUN_AC_OR_DIE(command, 0, 0, data, xlen/(4*8)); + + // S1 = Addr + len, loop until S0 = S1 + prog[0] = STORE(xlen, X0, S0, 0); + prog[1] = ADDI(S0, S0, xlen/8); + prog[2] = BNE(S0, S1, 0*4, 2*4); + prog[3] = EBREAK; + + data[0] = (uint32_t) (taddr + len); + data[1] = (uint32_t) ((taddr + len) >> 32); + command = AC_ACCESS_REGISTER_TRANSFER | + AC_ACCESS_REGISTER_WRITE | + AC_AR_SIZE(xlen) | + AC_AR_REGNO(S1) | + AC_ACCESS_REGISTER_POSTEXEC; + RUN_AC_OR_DIE(command, prog, 4, data, xlen/(4*8)); + + restore_reg(S0, s0); + restore_reg(S1, s1); + + resume(current_hart); +} + +uint64_t dtm_t::write_csr(unsigned which, uint64_t data) +{ + return modify_csr(which, data, WRITE); +} + +uint64_t dtm_t::set_csr(unsigned which, uint64_t data) +{ + return modify_csr(which, data, SET); +} + +uint64_t dtm_t::clear_csr(unsigned which, uint64_t data) +{ + return modify_csr(which, data, CLEAR); +} + +uint64_t dtm_t::read_csr(unsigned which) +{ + return set_csr(which, 0); +} + +uint64_t dtm_t::modify_csr(unsigned which, uint64_t data, uint32_t type) +{ + halt(current_hart); + + // This code just uses DSCRATCH to save S0 + // and data_base to do the transfer so we don't + // need to run more commands to save and restore + // S0. + uint32_t prog[] = { + CSRRx(WRITE, S0, CSR_DSCRATCH0, S0), + LOAD(xlen, S0, X0, data_base), + CSRRx(type, S0, which, S0), + STORE(xlen, S0, X0, data_base), + CSRRx(WRITE, S0, CSR_DSCRATCH0, S0), + EBREAK + }; + + //TODO: Use transfer = 0. For now both HW and OpenOCD + // ignore transfer bit, so use "store to X0" NOOP. + // We sort of need this anyway because run_abstract_command + // needs the DATA to be written so may as well use the WRITE flag. + + uint32_t adata[] = {(uint32_t) data, + (uint32_t) (data >> 32)}; + + uint32_t command = AC_ACCESS_REGISTER_POSTEXEC | + AC_ACCESS_REGISTER_TRANSFER | + AC_ACCESS_REGISTER_WRITE | + AC_AR_SIZE(xlen) | + AC_AR_REGNO(X0); + + RUN_AC_OR_DIE(command, prog, sizeof(prog) / sizeof(*prog), adata, xlen/(4*8)); + + uint64_t res = read(DMI_DATA0);//adata[0]; + if (xlen == 64) + res |= read(DMI_DATA0 + 1);//((uint64_t) adata[1]) << 32; + + resume(current_hart); + return res; +} + +size_t dtm_t::chunk_max_size() +{ + // Arbitrary choice. 4k Page size seems reasonable. + return 4096; +} + +uint32_t dtm_t::get_xlen() +{ + // Attempt to read S0 to find out what size it is. + // You could also attempt to run code, but you need to save registers + // to do that anyway. If what you really want to do is figure out + // the size of S0 so you can save it later, then do that. + uint32_t command = AC_ACCESS_REGISTER_TRANSFER | AC_AR_REGNO(S0); + uint32_t cmderr; + + const uint32_t prog[] = {}; + uint32_t data[] = {}; + + cmderr = run_abstract_command(command | AC_AR_SIZE(128), prog, 0, data, 0); + if (cmderr == 0){ + throw std::runtime_error("FESVR DTM Does not support 128-bit"); + abort(); + return 128; + } + write(DMI_ABSTRACTCS, DMI_ABSTRACTCS_CMDERR); + + cmderr = run_abstract_command(command | AC_AR_SIZE(64), prog, 0, data, 0); + if (cmderr == 0){ + return 64; + } + write(DMI_ABSTRACTCS, DMI_ABSTRACTCS_CMDERR); + + cmderr = run_abstract_command(command | AC_AR_SIZE(32), prog, 0, data, 0); + if (cmderr == 0){ + return 32; + } + + throw std::runtime_error("FESVR DTM can't determine XLEN. Aborting"); +} + +void dtm_t::fence_i() +{ + halt(current_hart); + + const uint32_t prog[] = { + FENCE_I, + EBREAK + }; + + //TODO: Use the transfer = 0. + uint32_t command = AC_ACCESS_REGISTER_POSTEXEC | + AC_ACCESS_REGISTER_TRANSFER | + AC_ACCESS_REGISTER_WRITE | + AC_AR_SIZE(xlen) | + AC_AR_REGNO(X0); + + RUN_AC_OR_DIE(command, prog, sizeof(prog)/sizeof(*prog), 0, 0); + + resume(current_hart); + +} + +void host_thread_main(void* arg) +{ + ((dtm_t*)arg)->producer_thread(); +} + +void dtm_t::reset() +{ + for (int hartsel = 0; hartsel < num_harts; hartsel ++ ){ + select_hart(hartsel); + // this command also does a halt and resume + fence_i(); + // after this command, the hart will run from _start. + write_csr(0x7b1, get_entry_point()); + } + // In theory any hart can handle the memory accesses, + // this will enforce that hart 0 handles them. + select_hart(0); + read(DMI_DMSTATUS); +} + +void dtm_t::idle() +{ + for (int idle_cycles = 0; idle_cycles < max_idle_cycles; idle_cycles++) + nop(); +} + +void dtm_t::producer_thread() +{ + // Learn about the Debug Module and assert things we + // depend on in this code. + + // Enable the debugger. + write(DMI_DMCONTROL, DMI_DMCONTROL_DMACTIVE); + // Poll until the debugger agrees it's enabled. + while ((read(DMI_DMCONTROL) & DMI_DMCONTROL_DMACTIVE) == 0) ; + + // These are checked every time we run an abstract command. + uint32_t abstractcs = read(DMI_ABSTRACTCS); + ram_words = get_field(abstractcs, DMI_ABSTRACTCS_PROGSIZE); + data_words = get_field(abstractcs, DMI_ABSTRACTCS_DATACOUNT); + + // These things are only needed for the 'modify_csr' function. + // That could be re-written to not use these at some performance + // overhead. + uint32_t hartinfo = read(DMI_HARTINFO); + assert(get_field(hartinfo, DMI_HARTINFO_NSCRATCH) > 0); + assert(get_field(hartinfo, DMI_HARTINFO_DATAACCESS)); + + data_base = get_field(hartinfo, DMI_HARTINFO_DATAADDR); + + num_harts = enumerate_harts(); + halt(0); + // Note: We don't support systems with heterogeneous XLEN. + // It's possible to do this at the cost of extra cycles. + xlen = get_xlen(); + resume(0); + + running = true; + + htif_t::run(); + + while (true) + nop(); +} + +void dtm_t::start_host_thread() +{ + req_wait = false; + resp_wait = false; + + target = context_t::current(); + host.init(host_thread_main, this); + host.switch_to(); +} + +dtm_t::dtm_t(int argc, char** argv) + : htif_t(argc, argv), running(false) +{ + start_host_thread(); +} + +dtm_t::~dtm_t() +{ +} + +void dtm_t::tick( + bool req_ready, + bool resp_valid, + resp resp_bits) +{ + if (!resp_wait) { + if (!req_wait) { + req_wait = true; + } else if (req_ready) { + req_wait = false; + resp_wait = true; + } + } + + if (resp_valid) { + assert(resp_wait); + resp_wait = false; + + resp_buf = resp_bits; + // update the target with the current context + target = context_t::current(); + host.switch_to(); + } +} + +void dtm_t::return_resp(resp resp_bits){ + resp_buf = resp_bits; + target = context_t::current(); + host.switch_to(); +} diff --git a/vendor/riscv-isa-sim/fesvr/dtm.h b/vendor/riscv-isa-sim/fesvr/dtm.h new file mode 100644 index 00000000..fbf161ef --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/dtm.h @@ -0,0 +1,115 @@ +#ifndef _ROCKET_DTM_H +#define _ROCKET_DTM_H + +#include "htif.h" +#include "context.h" +#include +#include +#include +#include +#include +#include + +// abstract debug transport module +class dtm_t : public htif_t +{ + public: + dtm_t(int argc, char**argv); + ~dtm_t(); + + struct req { + uint32_t addr; + uint32_t op; + uint32_t data; + }; + + struct resp { + uint32_t resp; + uint32_t data; + }; + + void tick( + bool req_ready, + bool resp_valid, + resp resp_bits + ); + // Akin to tick, but the target thread returns a response on every invocation + void return_resp( + resp resp_bits + ); + + + bool req_valid() { return req_wait; } + req req_bits() { return req_buf; } + bool resp_ready() { return true; } + + uint32_t read(uint32_t addr); + uint32_t write(uint32_t addr, uint32_t data); + void nop(); + + uint64_t read_csr(unsigned which); + uint64_t write_csr(unsigned which, uint64_t data); + uint64_t clear_csr(unsigned which, uint64_t data); + uint64_t set_csr(unsigned which, uint64_t data); + void fence_i(); + + void producer_thread(); + + protected: + virtual void read_chunk(addr_t taddr, size_t len, void* dst) override; + virtual void write_chunk(addr_t taddr, size_t len, const void* src) override; + virtual void clear_chunk(addr_t taddr, size_t len) override; + virtual size_t chunk_align() override; + virtual size_t chunk_max_size() override; + virtual void reset() override; + virtual void idle() override; + + private: + context_t host; + context_t* target; + pthread_t producer; + sem_t req_produce; + sem_t req_consume; + sem_t resp_produce; + sem_t resp_consume; + req req_buf; + resp resp_buf; + bool running; + + uint32_t run_abstract_command(uint32_t command, const uint32_t program[], size_t program_n, + uint32_t data[], size_t data_n); + + void die(uint32_t cmderr); + void halt(int); + int enumerate_harts(); + void select_hart(int); + void resume(int); + uint64_t save_reg(unsigned regno); + void restore_reg(unsigned regno, uint64_t val); + + uint64_t modify_csr(unsigned which, uint64_t data, uint32_t type); + + bool req_wait; + bool resp_wait; + uint32_t data_base; + + uint32_t xlen; + + static const int max_idle_cycles = 10000; + + size_t ram_words; + size_t data_words; + int num_harts; + int current_hart; + + uint32_t get_xlen(); + uint32_t do_command(dtm_t::req r); + + void parse_args(const std::vector& args); + void register_devices(); + void start_host_thread(); + + friend class memif_t; +}; + +#endif diff --git a/vendor/riscv-isa-sim/fesvr/dummy.cc b/vendor/riscv-isa-sim/fesvr/dummy.cc new file mode 100644 index 00000000..a155d3e5 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/dummy.cc @@ -0,0 +1,4 @@ +// See LICENSE for license details. + +// help out poor, C-centric autoconf +extern "C" void libfesvr_is_present() {} diff --git a/vendor/riscv-isa-sim/fesvr/elf.h b/vendor/riscv-isa-sim/fesvr/elf.h new file mode 100644 index 00000000..7b38bf11 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/elf.h @@ -0,0 +1,134 @@ +// See LICENSE for details. + +#ifndef _ELF_H +#define _ELF_H + +#include + +#define ET_EXEC 2 +#define EM_RISCV 243 +#define EM_NONE 0 +#define EV_CURRENT 1 + +#define IS_ELF(hdr) \ + ((hdr).e_ident[0] == 0x7f && (hdr).e_ident[1] == 'E' && \ + (hdr).e_ident[2] == 'L' && (hdr).e_ident[3] == 'F') + +#define ELF_SWAP(hdr, val) (IS_ELFLE(hdr)? from_le((val)) : from_be((val))) + +#define IS_ELF32(hdr) (IS_ELF(hdr) && (hdr).e_ident[4] == 1) +#define IS_ELF64(hdr) (IS_ELF(hdr) && (hdr).e_ident[4] == 2) +#define IS_ELFLE(hdr) (IS_ELF(hdr) && (hdr).e_ident[5] == 1) +#define IS_ELFBE(hdr) (IS_ELF(hdr) && (hdr).e_ident[5] == 2) +#define IS_ELF_EXEC(hdr) (IS_ELF(hdr) && ELF_SWAP((hdr), (hdr).e_type) == ET_EXEC) +#define IS_ELF_RISCV(hdr) (IS_ELF(hdr) && ELF_SWAP((hdr), (hdr).e_machine) == EM_RISCV) +#define IS_ELF_EM_NONE(hdr) (IS_ELF(hdr) && ELF_SWAP((hdr), (hdr).e_machine) == EM_NONE) +#define IS_ELF_VCURRENT(hdr) (IS_ELF(hdr) && ELF_SWAP((hdr), (hdr).e_version) == EV_CURRENT) + +#define PT_LOAD 1 + +#define SHT_NOBITS 8 + +typedef struct { + uint8_t e_ident[16]; + uint16_t e_type; + uint16_t e_machine; + uint32_t e_version; + uint32_t e_entry; + uint32_t e_phoff; + uint32_t e_shoff; + uint32_t e_flags; + uint16_t e_ehsize; + uint16_t e_phentsize; + uint16_t e_phnum; + uint16_t e_shentsize; + uint16_t e_shnum; + uint16_t e_shstrndx; +} Elf32_Ehdr; + +typedef struct { + uint32_t sh_name; + uint32_t sh_type; + uint32_t sh_flags; + uint32_t sh_addr; + uint32_t sh_offset; + uint32_t sh_size; + uint32_t sh_link; + uint32_t sh_info; + uint32_t sh_addralign; + uint32_t sh_entsize; +} Elf32_Shdr; + +typedef struct +{ + uint32_t p_type; + uint32_t p_offset; + uint32_t p_vaddr; + uint32_t p_paddr; + uint32_t p_filesz; + uint32_t p_memsz; + uint32_t p_flags; + uint32_t p_align; +} Elf32_Phdr; + +typedef struct +{ + uint32_t st_name; + uint32_t st_value; + uint32_t st_size; + uint8_t st_info; + uint8_t st_other; + uint16_t st_shndx; +} Elf32_Sym; + +typedef struct { + uint8_t e_ident[16]; + uint16_t e_type; + uint16_t e_machine; + uint32_t e_version; + uint64_t e_entry; + uint64_t e_phoff; + uint64_t e_shoff; + uint32_t e_flags; + uint16_t e_ehsize; + uint16_t e_phentsize; + uint16_t e_phnum; + uint16_t e_shentsize; + uint16_t e_shnum; + uint16_t e_shstrndx; +} Elf64_Ehdr; + +typedef struct { + uint32_t sh_name; + uint32_t sh_type; + uint64_t sh_flags; + uint64_t sh_addr; + uint64_t sh_offset; + uint64_t sh_size; + uint32_t sh_link; + uint32_t sh_info; + uint64_t sh_addralign; + uint64_t sh_entsize; +} Elf64_Shdr; + +typedef struct { + uint32_t p_type; + uint32_t p_flags; + uint64_t p_offset; + uint64_t p_vaddr; + uint64_t p_paddr; + uint64_t p_filesz; + uint64_t p_memsz; + uint64_t p_align; +} Elf64_Phdr; + +typedef struct { + uint32_t st_name; + uint8_t st_info; + uint8_t st_other; + uint16_t st_shndx; + uint64_t st_value; + uint64_t st_size; +} Elf64_Sym; + +#endif diff --git a/vendor/riscv-isa-sim/fesvr/elf2hex.cc b/vendor/riscv-isa-sim/fesvr/elf2hex.cc new file mode 100644 index 00000000..327cf2d9 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/elf2hex.cc @@ -0,0 +1,47 @@ +// See LICENSE for license details. + +#include +#include "htif_hexwriter.h" +#include "memif.h" +#include "elfloader.h" + +int main(int argc, char** argv) +{ + if(argc < 4 || argc > 5) + { + std::cerr << "Usage: " << argv[0] << " [base]" << std::endl; + return 1; + } + + unsigned width = atoi(argv[1]); + if(width == 0 || (width & (width-1))) + { + std::cerr << "width must be a power of 2" << std::endl; + return 1; + } + + unsigned long long int base = 0; + if(argc==5) { + base = atoll(argv[4]); + if(base & (width-1)) + { + std::cerr << "base must be divisible by width" << std::endl; + return 1; + } + } + + unsigned depth = atoi(argv[2]); + if(depth == 0 || (depth & (depth-1))) + { + std::cerr << "depth must be a power of 2" << std::endl; + return 1; + } + + htif_hexwriter_t htif(base, width, depth); + memif_t memif(&htif); + reg_t entry; + load_elf(argv[3], &memif, &entry); + std::cout << htif; + + return 0; +} diff --git a/vendor/riscv-isa-sim/fesvr/elfloader.cc b/vendor/riscv-isa-sim/fesvr/elfloader.cc new file mode 100644 index 00000000..76cd6da5 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/elfloader.cc @@ -0,0 +1,117 @@ +// See LICENSE for license details. + +#include "elf.h" +#include "memif.h" +#include "byteorder.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +std::map load_elf(const char* fn, memif_t* memif, reg_t* entry) +{ + int fd = open(fn, O_RDONLY); + struct stat s; + assert(fd != -1); + if (fstat(fd, &s) < 0) + abort(); + size_t size = s.st_size; + + char* buf = (char*)mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0); + assert(buf != MAP_FAILED); + close(fd); + + assert(size >= sizeof(Elf64_Ehdr)); + const Elf64_Ehdr* eh64 = (const Elf64_Ehdr*)buf; + assert(IS_ELF32(*eh64) || IS_ELF64(*eh64)); + assert(IS_ELFLE(*eh64) || IS_ELFBE(*eh64)); + assert(IS_ELF_EXEC(*eh64)); + assert(IS_ELF_RISCV(*eh64) || IS_ELF_EM_NONE(*eh64)); + assert(IS_ELF_VCURRENT(*eh64)); + + std::vector zeros; + std::map symbols; + +#define LOAD_ELF(ehdr_t, phdr_t, shdr_t, sym_t, bswap) \ + do { \ + ehdr_t* eh = (ehdr_t*)buf; \ + phdr_t* ph = (phdr_t*)(buf + bswap(eh->e_phoff)); \ + *entry = bswap(eh->e_entry); \ + assert(size >= bswap(eh->e_phoff) + bswap(eh->e_phnum) * sizeof(*ph)); \ + for (unsigned i = 0; i < bswap(eh->e_phnum); i++) { \ + if (bswap(ph[i].p_type) == PT_LOAD && bswap(ph[i].p_memsz)) { \ + if (bswap(ph[i].p_filesz)) { \ + assert(size >= bswap(ph[i].p_offset) + bswap(ph[i].p_filesz)); \ + memif->write(bswap(ph[i].p_paddr), bswap(ph[i].p_filesz), \ + (uint8_t*)buf + bswap(ph[i].p_offset)); \ + } \ + if (size_t pad = bswap(ph[i].p_memsz) - bswap(ph[i].p_filesz)) { \ + zeros.resize(pad); \ + memif->write(bswap(ph[i].p_paddr) + bswap(ph[i].p_filesz), pad, \ + zeros.data()); \ + } \ + } \ + } \ + shdr_t* sh = (shdr_t*)(buf + bswap(eh->e_shoff)); \ + assert(size >= bswap(eh->e_shoff) + bswap(eh->e_shnum) * sizeof(*sh)); \ + assert(bswap(eh->e_shstrndx) < bswap(eh->e_shnum)); \ + assert(size >= bswap(sh[bswap(eh->e_shstrndx)].sh_offset) + \ + bswap(sh[bswap(eh->e_shstrndx)].sh_size)); \ + char* shstrtab = buf + bswap(sh[bswap(eh->e_shstrndx)].sh_offset); \ + unsigned strtabidx = 0, symtabidx = 0; \ + for (unsigned i = 0; i < bswap(eh->e_shnum); i++) { \ + unsigned max_len = \ + bswap(sh[bswap(eh->e_shstrndx)].sh_size) - bswap(sh[i].sh_name); \ + assert(bswap(sh[i].sh_name) < bswap(sh[bswap(eh->e_shstrndx)].sh_size)); \ + assert(strnlen(shstrtab + bswap(sh[i].sh_name), max_len) < max_len); \ + if (bswap(sh[i].sh_type) & SHT_NOBITS) continue; \ + assert(size >= bswap(sh[i].sh_offset) + bswap(sh[i].sh_size)); \ + if (strcmp(shstrtab + bswap(sh[i].sh_name), ".strtab") == 0) \ + strtabidx = i; \ + if (strcmp(shstrtab + bswap(sh[i].sh_name), ".symtab") == 0) \ + symtabidx = i; \ + } \ + if (strtabidx && symtabidx) { \ + char* strtab = buf + bswap(sh[strtabidx].sh_offset); \ + sym_t* sym = (sym_t*)(buf + bswap(sh[symtabidx].sh_offset)); \ + for (unsigned i = 0; i < bswap(sh[symtabidx].sh_size) / sizeof(sym_t); \ + i++) { \ + unsigned max_len = \ + bswap(sh[strtabidx].sh_size) - bswap(sym[i].st_name); \ + assert(bswap(sym[i].st_name) < bswap(sh[strtabidx].sh_size)); \ + assert(strnlen(strtab + bswap(sym[i].st_name), max_len) < max_len); \ + symbols[strtab + bswap(sym[i].st_name)] = bswap(sym[i].st_value); \ + } \ + } \ + } while (0) + + if (IS_ELFLE(*eh64)) { + memif->set_target_endianness(memif_endianness_little); + if (IS_ELF32(*eh64)) + LOAD_ELF(Elf32_Ehdr, Elf32_Phdr, Elf32_Shdr, Elf32_Sym, from_le); + else + LOAD_ELF(Elf64_Ehdr, Elf64_Phdr, Elf64_Shdr, Elf64_Sym, from_le); + } else { +#ifndef RISCV_ENABLE_DUAL_ENDIAN + throw std::invalid_argument("Specified ELF is big endian. Configure with --enable-dual-endian to enable support"); +#else + memif->set_target_endianness(memif_endianness_big); + if (IS_ELF32(*eh64)) + LOAD_ELF(Elf32_Ehdr, Elf32_Phdr, Elf32_Shdr, Elf32_Sym, from_be); + else + LOAD_ELF(Elf64_Ehdr, Elf64_Phdr, Elf64_Shdr, Elf64_Sym, from_be); +#endif + } + + munmap(buf, size); + + return symbols; +} diff --git a/vendor/riscv-isa-sim/fesvr/elfloader.h b/vendor/riscv-isa-sim/fesvr/elfloader.h new file mode 100644 index 00000000..696ef478 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/elfloader.h @@ -0,0 +1,13 @@ +// See LICENSE for license details. + +#ifndef _ELFLOADER_H +#define _ELFLOADER_H + +#include "elf.h" +#include +#include + +class memif_t; +std::map load_elf(const char* fn, memif_t* memif, reg_t* entry); + +#endif diff --git a/vendor/riscv-isa-sim/fesvr/fesvr.ac b/vendor/riscv-isa-sim/fesvr/fesvr.ac new file mode 100644 index 00000000..f741baea --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/fesvr.ac @@ -0,0 +1,11 @@ +AC_CHECK_LIB(pthread, pthread_create, [], [AC_MSG_ERROR([libpthread is required])]) + +AC_CHECK_MEMBER(struct statx.stx_ino, + AC_DEFINE_UNQUOTED(HAVE_STATX, 1, [Define to 1 if struct statx exists.]), + , +) + +AC_CHECK_MEMBER(struct statx.stx_mnt_id, + AC_DEFINE_UNQUOTED(HAVE_STATX_MNT_ID, 1, [Define to 1 if struct statx has stx_mnt_id.]), + , +) diff --git a/vendor/riscv-isa-sim/fesvr/fesvr.mk.in b/vendor/riscv-isa-sim/fesvr/fesvr.mk.in new file mode 100644 index 00000000..695de527 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/fesvr.mk.in @@ -0,0 +1,41 @@ +fesvr_hdrs = \ + byteorder.h \ + elf.h \ + elfloader.h \ + htif.h \ + dtm.h \ + memif.h \ + syscall.h \ + context.h \ + htif_pthread.h \ + htif_hexwriter.h \ + option_parser.h \ + term.h \ + device.h \ + rfb.h \ + tsi.h \ + +fesvr_install_hdrs = $(fesvr_hdrs) + +fesvr_install_config_hdr = yes + +fesvr_install_lib = yes + +fesvr_srcs = \ + elfloader.cc \ + htif.cc \ + memif.cc \ + dtm.cc \ + syscall.cc \ + device.cc \ + rfb.cc \ + context.cc \ + htif_pthread.cc \ + htif_hexwriter.cc \ + dummy.cc \ + option_parser.cc \ + term.cc \ + tsi.cc \ + +fesvr_install_prog_srcs = \ + elf2hex.cc \ diff --git a/vendor/riscv-isa-sim/fesvr/fesvr.pc.in b/vendor/riscv-isa-sim/fesvr/fesvr.pc.in new file mode 100644 index 00000000..f2d12563 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/fesvr.pc.in @@ -0,0 +1,26 @@ +#========================================================================= +# Modular C++ Build System Subproject Package Config +#========================================================================= +# Please read the documenation in 'mcppbs-uguide.txt' for more details +# on how the Modular C++ Build System works. + +#------------------------------------------------------------------------- +# Generic variables +#------------------------------------------------------------------------- + +prefix=@prefix@ +include_dir=${prefix}/include/fesvr +lib_dir=${prefix}/lib + +#------------------------------------------------------------------------- +# Keywords +#------------------------------------------------------------------------- + +Name : fesvr +Version : @PACKAGE_VERSION@ +Description : Frontend Server C/C++ API +Requires : @fesvr_pkcdeps@ +Cflags : -I${include_dir} @CPPFLAGS@ @fesvr_extra_cppflags@ +Libs : -L${lib_dir} @LDFLAGS@ @fesvr_extra_ldflags@ \ + -lfesvr @fesvr_extra_libs@ + diff --git a/vendor/riscv-isa-sim/fesvr/htif.cc b/vendor/riscv-isa-sim/fesvr/htif.cc new file mode 100644 index 00000000..ead309c8 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/htif.cc @@ -0,0 +1,415 @@ +// See LICENSE for license details. + +#include "htif.h" +#include "rfb.h" +#include "elfloader.h" +#include "platform.h" +#include "byteorder.h" +#include "trap.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Attempt to determine the execution prefix automatically. autoconf + * sets PREFIX, and pconfigure sets __PCONFIGURE__PREFIX. */ +#if !defined(PREFIX) && defined(__PCONFIGURE__PREFIX) +# define PREFIX __PCONFIGURE__PREFIX +#endif + +#ifndef TARGET_ARCH +# define TARGET_ARCH "riscv64-unknown-elf" +#endif + +#ifndef TARGET_DIR +# define TARGET_DIR "/" TARGET_ARCH "/bin/" +#endif + +static volatile bool signal_exit = false; +static void handle_signal(int sig) +{ + if (sig == SIGABRT || signal_exit) // someone set up us the bomb! + exit(-1); + signal_exit = true; + signal(sig, &handle_signal); +} + +htif_t::htif_t() + : mem(this), entry(DRAM_BASE), sig_addr(0), sig_len(0), + tohost_addr(0), fromhost_addr(0), exitcode(0), stopped(false), + syscall_proxy(this) +{ + signal(SIGINT, &handle_signal); + signal(SIGTERM, &handle_signal); + signal(SIGABRT, &handle_signal); // we still want to call static destructors +} + +htif_t::htif_t(int argc, char** argv) : htif_t() +{ + //Set line size as 16 by default. + line_size = 16; + parse_arguments(argc, argv); + register_devices(); +} + +htif_t::htif_t(const std::vector& args) : htif_t() +{ + int argc = args.size() + 1; + char * argv[argc]; + argv[0] = (char *) "htif"; + for (unsigned int i = 0; i < args.size(); i++) { + argv[i+1] = (char *) args[i].c_str(); + } + //Set line size as 16 by default. + line_size = 16; + parse_arguments(argc, argv); + register_devices(); +} + +htif_t::~htif_t() +{ + for (auto d : dynamic_devices) + delete d; +} + +void htif_t::start() +{ + if (!targs.empty() && targs[0] != "none") + load_program(); + + reset(); +} + +static void bad_address(const std::string& situation, reg_t addr) +{ + std::cerr << "Access exception occurred while " << situation << ":\n"; + std::cerr << "Memory address 0x" << std::hex << addr << " is invalid\n"; + exit(-1); +} + +std::map htif_t::load_payload(const std::string& payload, reg_t* entry) +{ + std::string path; + if (access(payload.c_str(), F_OK) == 0) + path = payload; + else if (payload.find('/') == std::string::npos) + { + std::string test_path = PREFIX TARGET_DIR + payload; + if (access(test_path.c_str(), F_OK) == 0) + path = test_path; + } + + if (path.empty()) + throw std::runtime_error( + "could not open " + payload + + " (did you misspell it? If VCS, did you forget +permissive/+permissive-off?)"); + + // temporarily construct a memory interface that skips writing bytes + // that have already been preloaded through a sideband + class preload_aware_memif_t : public memif_t { + public: + preload_aware_memif_t(htif_t* htif) : memif_t(htif), htif(htif) {} + + void write(addr_t taddr, size_t len, const void* src) override + { + if (!htif->is_address_preloaded(taddr, len)) + memif_t::write(taddr, len, src); + } + + private: + htif_t* htif; + } preload_aware_memif(this); + + try { + return load_elf(path.c_str(), &preload_aware_memif, entry); + } catch (mem_trap_t& t) { + bad_address("loading payload " + payload, t.get_tval()); + abort(); + } +} + +void htif_t::load_program() +{ + std::map symbols = load_payload(targs[0], &entry); + + if (symbols.count("tohost") && symbols.count("fromhost")) { + tohost_addr = symbols["tohost"]; + fromhost_addr = symbols["fromhost"]; + } else { + fprintf(stderr, "warning: tohost and fromhost symbols not in ELF; can't communicate with target\n"); + } + + // detect torture tests so we can print the memory signature at the end + if (symbols.count("begin_signature") && symbols.count("end_signature")) + { + sig_addr = symbols["begin_signature"]; + sig_len = symbols["end_signature"] - sig_addr; + } + + for (auto payload : payloads) + { + reg_t dummy_entry; + load_payload(payload, &dummy_entry); + } + + for (auto i : symbols) + { + auto it = addr2symbol.find(i.second); + if ( it == addr2symbol.end()) + addr2symbol[i.second] = i.first; + } + + return; +} + +const char* htif_t::get_symbol(uint64_t addr) +{ + auto it = addr2symbol.find(addr); + + if(it == addr2symbol.end()) + return nullptr; + + return it->second.c_str(); +} + +void htif_t::stop() +{ + if (!sig_file.empty() && sig_len) // print final torture test signature + { + std::vector buf(sig_len); + mem.read(sig_addr, sig_len, buf.data()); + + std::ofstream sigs(sig_file); + assert(sigs && "can't open signature file!"); + sigs << std::setfill('0') << std::hex; + + for (addr_t i = 0; i < sig_len; i += line_size) + { + for (addr_t j = line_size; j > 0; j--) + if (i+j <= sig_len) + sigs << std::setw(2) << (uint16_t)buf[i+j-1]; + else + sigs << std::setw(2) << (uint16_t)0; + sigs << '\n'; + } + + sigs.close(); + } + + stopped = true; +} + +void htif_t::clear_chunk(addr_t taddr, size_t len) +{ + char zeros[chunk_max_size()]; + memset(zeros, 0, chunk_max_size()); + + for (size_t pos = 0; pos < len; pos += chunk_max_size()) + write_chunk(taddr + pos, std::min(len - pos, chunk_max_size()), zeros); +} + +int htif_t::run() +{ + start(); + + auto enq_func = [](std::queue* q, uint64_t x) { q->push(x); }; + std::queue fromhost_queue; + std::function fromhost_callback = + std::bind(enq_func, &fromhost_queue, std::placeholders::_1); + + if (tohost_addr == 0) { + while (true) + idle(); + } + + while (!signal_exit && exitcode == 0) + { + uint64_t tohost; + + try { + if ((tohost = from_target(mem.read_uint64(tohost_addr))) != 0) + mem.write_uint64(tohost_addr, target_endian::zero); + } catch (mem_trap_t& t) { + bad_address("accessing tohost", t.get_tval()); + } + + try { + if (tohost != 0) { + command_t cmd(mem, tohost, fromhost_callback); + device_list.handle_command(cmd); + } else { + idle(); + } + + device_list.tick(); + } catch (mem_trap_t& t) { + std::stringstream tohost_hex; + tohost_hex << std::hex << tohost; + bad_address("host was accessing memory on behalf of target (tohost = 0x" + tohost_hex.str() + ")", t.get_tval()); + } + + try { + if (!fromhost_queue.empty() && !mem.read_uint64(fromhost_addr)) { + mem.write_uint64(fromhost_addr, to_target(fromhost_queue.front())); + fromhost_queue.pop(); + } + } catch (mem_trap_t& t) { + bad_address("accessing fromhost", t.get_tval()); + } + } + + stop(); + + return exit_code(); +} + +bool htif_t::done() +{ + return stopped; +} + +int htif_t::exit_code() +{ + return exitcode >> 1; +} + +void htif_t::parse_arguments(int argc, char ** argv) +{ + optind = 0; // reset optind as HTIF may run getopt _after_ others + while (1) { + static struct option long_options[] = { HTIF_LONG_OPTIONS }; + int option_index = 0; + int c = getopt_long(argc, argv, "-h", long_options, &option_index); + + if (c == -1) break; + retry: + switch (c) { + case 'h': usage(argv[0]); + throw std::invalid_argument("User queried htif_t help text"); + case HTIF_LONG_OPTIONS_OPTIND: + if (optarg) dynamic_devices.push_back(new rfb_t(atoi(optarg))); + else dynamic_devices.push_back(new rfb_t); + break; + case HTIF_LONG_OPTIONS_OPTIND + 1: + // [TODO] Remove once disks are supported again + throw std::invalid_argument("--disk/+disk unsupported (use a ramdisk)"); + dynamic_devices.push_back(new disk_t(optarg)); + break; + case HTIF_LONG_OPTIONS_OPTIND + 2: + sig_file = optarg; + break; + case HTIF_LONG_OPTIONS_OPTIND + 3: + syscall_proxy.set_chroot(optarg); + break; + case HTIF_LONG_OPTIONS_OPTIND + 4: + payloads.push_back(optarg); + break; + case HTIF_LONG_OPTIONS_OPTIND + 5: + line_size = atoi(optarg); + + break; + case '?': + if (!opterr) + break; + throw std::invalid_argument("Unknown argument (did you mean to enable +permissive parsing?)"); + case 1: { + std::string arg = optarg; + if (arg == "+h" || arg == "+help") { + c = 'h'; + optarg = nullptr; + } + else if (arg == "+rfb") { + c = HTIF_LONG_OPTIONS_OPTIND; + optarg = nullptr; + } + else if (arg.find("+rfb=") == 0) { + c = HTIF_LONG_OPTIONS_OPTIND; + optarg = optarg + 5; + } + else if (arg.find("+disk=") == 0) { + c = HTIF_LONG_OPTIONS_OPTIND + 1; + optarg = optarg + 6; + } + else if (arg.find("+signature=") == 0) { + c = HTIF_LONG_OPTIONS_OPTIND + 2; + optarg = optarg + 11; + } + else if (arg.find("+chroot=") == 0) { + c = HTIF_LONG_OPTIONS_OPTIND + 3; + optarg = optarg + 8; + } + else if (arg.find("+payload=") == 0) { + c = HTIF_LONG_OPTIONS_OPTIND + 4; + optarg = optarg + 9; + } + else if(arg.find("+signature-granularity=")==0){ + c = HTIF_LONG_OPTIONS_OPTIND + 5; + optarg = optarg + 23; + } + else if (arg.find("+permissive-off") == 0) { + if (opterr) + throw std::invalid_argument("Found +permissive-off when not parsing permissively"); + opterr = 1; + break; + } + else if (arg.find("+permissive") == 0) { + if (!opterr) + throw std::invalid_argument("Found +permissive when already parsing permissively"); + opterr = 0; + break; + } + else { + if (!opterr) + break; + else { + optind--; + goto done_processing; + } + } + goto retry; + } + } + } + +done_processing: + while (optind < argc) + targs.push_back(argv[optind++]); + if (!targs.size()) { + usage(argv[0]); + throw std::invalid_argument("No binary specified (Did you forget it? Did you forget '+permissive-off' if running with +permissive?)"); + } +} + +void htif_t::register_devices() +{ + device_list.register_device(&syscall_proxy); + device_list.register_device(&bcd); + for (auto d : dynamic_devices) + device_list.register_device(d); +} + +void htif_t::usage(const char * program_name) +{ + printf("Usage: %s [EMULATOR OPTION]... [VERILOG PLUSARG]... [HOST OPTION]... BINARY [TARGET OPTION]...\n ", + program_name); + fputs("\ +Run a BINARY on the Rocket Chip emulator.\n\ +\n\ +Mandatory arguments to long options are mandatory for short options too.\n\ +\n\ +EMULATOR OPTIONS\n\ + Consult emulator.cc if using Verilator or VCS documentation if using VCS\n\ + for available options.\n\ +EMUALTOR VERILOG PLUSARGS\n\ + Consult generated-src*/*.plusArgs for available options\n\ +", stdout); + fputs("\n" HTIF_USAGE_OPTIONS, stdout); +} diff --git a/vendor/riscv-isa-sim/fesvr/htif.h b/vendor/riscv-isa-sim/fesvr/htif.h new file mode 100644 index 00000000..3cee25f7 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/htif.h @@ -0,0 +1,156 @@ +// See LICENSE for license details. + +#ifndef __HTIF_H +#define __HTIF_H + +#include "memif.h" +#include "syscall.h" +#include "device.h" +#include "byteorder.h" +#include +#include +#include +#include + +class htif_t : public chunked_memif_t +{ + public: + htif_t(); + htif_t(int argc, char** argv); + htif_t(const std::vector& args); + virtual ~htif_t(); + + virtual void start(); + virtual void stop(); + + int run(); + bool done(); + int exit_code(); + + virtual memif_t& memif() { return mem; } + + template inline T from_target(target_endian n) const + { +#ifdef RISCV_ENABLE_DUAL_ENDIAN + memif_endianness_t endianness = get_target_endianness(); + assert(endianness == memif_endianness_little || endianness == memif_endianness_big); + + return endianness == memif_endianness_big? n.from_be() : n.from_le(); +#else + return n.from_le(); +#endif + } + + template inline target_endian to_target(T n) const + { +#ifdef RISCV_ENABLE_DUAL_ENDIAN + memif_endianness_t endianness = get_target_endianness(); + assert(endianness == memif_endianness_little || endianness == memif_endianness_big); + + return endianness == memif_endianness_big? target_endian::to_be(n) : target_endian::to_le(n); +#else + return target_endian::to_le(n); +#endif + } + + protected: + virtual void reset() = 0; + + virtual void read_chunk(addr_t taddr, size_t len, void* dst) = 0; + virtual void write_chunk(addr_t taddr, size_t len, const void* src) = 0; + virtual void clear_chunk(addr_t taddr, size_t len); + + virtual size_t chunk_align() = 0; + virtual size_t chunk_max_size() = 0; + + virtual std::map load_payload(const std::string& payload, reg_t* entry); + virtual void load_program(); + virtual void idle() {} + + const std::vector& host_args() { return hargs; } + + reg_t get_entry_point() { return entry; } + + // indicates that the initial program load can skip writing this address + // range to memory, because it has already been loaded through a sideband + virtual bool is_address_preloaded(addr_t taddr, size_t len) { return false; } + + // Given an address, return symbol from addr2symbol map + const char* get_symbol(uint64_t addr); + + private: + void parse_arguments(int argc, char ** argv); + void register_devices(); + void usage(const char * program_name); + + memif_t mem; + reg_t entry; + bool writezeros; + std::vector hargs; + std::vector targs; + std::string sig_file; + unsigned int line_size; + addr_t sig_addr; // torture + addr_t sig_len; // torture + addr_t tohost_addr; + addr_t fromhost_addr; + int exitcode; + bool stopped; + + device_list_t device_list; + syscall_t syscall_proxy; + bcd_t bcd; + std::vector dynamic_devices; + std::vector payloads; + + const std::vector& target_args() { return targs; } + + std::map addr2symbol; + + friend class memif_t; + friend class syscall_t; +}; + +/* Alignment guide for emulator.cc options: + -x, --long-option Description with max 80 characters --------------->\n\ + +plus-arg-equivalent\n\ + */ +#define HTIF_USAGE_OPTIONS \ +"HOST OPTIONS\n\ + -h, --help Display this help and exit\n\ + +h, +help\n\ + +permissive The host will ignore any unparsed options up until\n\ + +permissive-off (Only needed for VCS)\n\ + +permissive-off Stop ignoring options. This is mandatory if using\n\ + +permissive (Only needed for VCS)\n\ + --rfb=DISPLAY Add new remote frame buffer on display DISPLAY\n\ + +rfb=DISPLAY to be accessible on 5900 + DISPLAY (default = 0)\n\ + --signature=FILE Write torture test signature to FILE\n\ + +signature=FILE\n\ + --signature-granularity=VAL Size of each line in signature.\n\ + +signature-granularity=VAL\n\ + --chroot=PATH Use PATH as location of syscall-servicing binaries\n\ + +chroot=PATH\n\ + --payload=PATH Load PATH memory as an additional ELF payload\n\ + +payload=PATH\n\ +\n\ +HOST OPTIONS (currently unsupported)\n\ + --disk=DISK Add DISK device. Use a ramdisk since this isn't\n\ + +disk=DISK supported\n\ +\n\ +TARGET (RISC-V BINARY) OPTIONS\n\ + These are the options passed to the program executing on the emulated RISC-V\n\ + microprocessor.\n" + +#define HTIF_LONG_OPTIONS_OPTIND 1024 +#define HTIF_LONG_OPTIONS \ +{"help", no_argument, 0, 'h' }, \ +{"rfb", optional_argument, 0, HTIF_LONG_OPTIONS_OPTIND }, \ +{"disk", required_argument, 0, HTIF_LONG_OPTIONS_OPTIND + 1 }, \ +{"signature", required_argument, 0, HTIF_LONG_OPTIONS_OPTIND + 2 }, \ +{"chroot", required_argument, 0, HTIF_LONG_OPTIONS_OPTIND + 3 }, \ +{"payload", required_argument, 0, HTIF_LONG_OPTIONS_OPTIND + 4 }, \ +{"signature-granularity", optional_argument, 0, HTIF_LONG_OPTIONS_OPTIND + 5 }, \ +{0, 0, 0, 0} + +#endif // __HTIF_H diff --git a/vendor/riscv-isa-sim/fesvr/htif_hexwriter.cc b/vendor/riscv-isa-sim/fesvr/htif_hexwriter.cc new file mode 100644 index 00000000..e4811b3b --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/htif_hexwriter.cc @@ -0,0 +1,76 @@ +// See LICENSE for license details. + +#include +#include +#include "htif_hexwriter.h" + +htif_hexwriter_t::htif_hexwriter_t(size_t b, size_t w, size_t d) + : base(b), width(w), depth(d) +{ +} + +void htif_hexwriter_t::read_chunk(addr_t taddr, size_t len, void* vdst) +{ + taddr -= base; + + assert(len % chunk_align() == 0); + assert(taddr < width*depth); + assert(taddr+len <= width*depth); + + uint8_t* dst = (uint8_t*)vdst; + while(len) + { + if(mem[taddr/width].size() == 0) + mem[taddr/width].resize(width,0); + + for(size_t j = 0; j < width; j++) + dst[j] = mem[taddr/width][j]; + + len -= width; + taddr += width; + dst += width; + } +} + +void htif_hexwriter_t::write_chunk(addr_t taddr, size_t len, const void* vsrc) +{ + taddr -= base; + + assert(len % chunk_align() == 0); + assert(taddr < width*depth); + assert(taddr+len <= width*depth); + + const uint8_t* src = (const uint8_t*)vsrc; + while(len) + { + if(mem[taddr/width].size() == 0) + mem[taddr/width].resize(width,0); + + for(size_t j = 0; j < width; j++) + mem[taddr/width][j] = src[j]; + + len -= width; + taddr += width; + } +} + +std::ostream& operator<< (std::ostream& o, const htif_hexwriter_t& h) +{ + std::ios_base::fmtflags flags = o.setf(std::ios::hex,std::ios::basefield); + + for(size_t addr = 0; addr < h.depth; addr++) + { + std::map >::const_iterator i = h.mem.find(addr); + if(i == h.mem.end()) + for(size_t j = 0; j < h.width; j++) + o << "00"; + else + for(size_t j = 0; j < h.width; j++) + o << ((i->second[h.width-j-1] >> 4) & 0xF) << (i->second[h.width-j-1] & 0xF); + o << std::endl; + } + + o.setf(flags); + + return o; +} diff --git a/vendor/riscv-isa-sim/fesvr/htif_hexwriter.h b/vendor/riscv-isa-sim/fesvr/htif_hexwriter.h new file mode 100644 index 00000000..72561662 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/htif_hexwriter.h @@ -0,0 +1,32 @@ +// See LICENSE for license details. + +#ifndef __HTIF_HEXWRITER_H +#define __HTIF_HEXWRITER_H + +#include +#include +#include +#include "memif.h" + +class htif_hexwriter_t : public chunked_memif_t +{ +public: + htif_hexwriter_t(size_t b, size_t w, size_t d); + +protected: + size_t base; + size_t width; + size_t depth; + std::map > mem; + + void read_chunk(addr_t taddr, size_t len, void* dst); + void write_chunk(addr_t taddr, size_t len, const void* src); + void clear_chunk(addr_t taddr, size_t len) {} + + size_t chunk_max_size() { return width; } + size_t chunk_align() { return width; } + + friend std::ostream& operator<< (std::ostream&, const htif_hexwriter_t&); +}; + +#endif // __HTIF_HEXWRITER_H diff --git a/vendor/riscv-isa-sim/fesvr/htif_pthread.cc b/vendor/riscv-isa-sim/fesvr/htif_pthread.cc new file mode 100644 index 00000000..b9e3832b --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/htif_pthread.cc @@ -0,0 +1,66 @@ +// See LICENSE for license details. + +#include "htif_pthread.h" +#include +#include + +void htif_pthread_t::thread_main(void* arg) +{ + htif_pthread_t* htif = static_cast(arg); + htif->run(); + while (true) + htif->target->switch_to(); +} + +htif_pthread_t::htif_pthread_t(int argc, char** argv) + : htif_t(argc, argv) +{ + target = context_t::current(); + host.init(thread_main, this); +} + +htif_pthread_t::~htif_pthread_t() +{ +} + +ssize_t htif_pthread_t::read(void* buf, size_t max_size) +{ + while (th_data.size() == 0) + target->switch_to(); + + size_t s = std::min(max_size, th_data.size()); + std::copy(th_data.begin(), th_data.begin() + s, (char*)buf); + th_data.erase(th_data.begin(), th_data.begin() + s); + + return s; +} + +ssize_t htif_pthread_t::write(const void* buf, size_t size) +{ + ht_data.insert(ht_data.end(), (const char*)buf, (const char*)buf + size); + return size; +} + +void htif_pthread_t::send(const void* buf, size_t size) +{ + th_data.insert(th_data.end(), (const char*)buf, (const char*)buf + size); +} + +void htif_pthread_t::recv(void* buf, size_t size) +{ + while (!this->recv_nonblocking(buf, size)) + ; +} + +bool htif_pthread_t::recv_nonblocking(void* buf, size_t size) +{ + if (ht_data.size() < size) + { + host.switch_to(); + return false; + } + + std::copy(ht_data.begin(), ht_data.begin() + size, (char*)buf); + ht_data.erase(ht_data.begin(), ht_data.begin() + size); + return true; +} diff --git a/vendor/riscv-isa-sim/fesvr/htif_pthread.h b/vendor/riscv-isa-sim/fesvr/htif_pthread.h new file mode 100644 index 00000000..c00c3823 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/htif_pthread.h @@ -0,0 +1,38 @@ +// See LICENSE for license details. + +#ifndef _HTIF_PTHREAD_H +#define _HTIF_PTHREAD_H + +#include "htif.h" +#include "context.h" +#include + +class htif_pthread_t : public htif_t +{ + public: + htif_pthread_t(int argc, char** argv); + virtual ~htif_pthread_t(); + + // target inteface + void send(const void* buf, size_t size); + void recv(void* buf, size_t size); + bool recv_nonblocking(void* buf, size_t size); + + protected: + // host interface + virtual ssize_t read(void* buf, size_t max_size); + virtual ssize_t write(const void* buf, size_t size); + + virtual size_t chunk_align() { return 64; } + virtual size_t chunk_max_size() { return 1024; } + + private: + context_t host; + context_t* target; + std::deque th_data; + std::deque ht_data; + + static void thread_main(void* htif); +}; + +#endif diff --git a/vendor/riscv-isa-sim/fesvr/memif.cc b/vendor/riscv-isa-sim/fesvr/memif.cc new file mode 100644 index 00000000..e56bd943 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/memif.cc @@ -0,0 +1,183 @@ +// See LICENSE for license details. + +#include +#include +#include +#include +#include "memif.h" + +void memif_t::read(addr_t addr, size_t len, void* bytes) +{ + size_t align = cmemif->chunk_align(); + if (len && (addr & (align-1))) + { + size_t this_len = std::min(len, align - size_t(addr & (align-1))); + uint8_t chunk[align]; + + cmemif->read_chunk(addr & ~(align-1), align, chunk); + memcpy(bytes, chunk + (addr & (align-1)), this_len); + + bytes = (char*)bytes + this_len; + addr += this_len; + len -= this_len; + } + + if (len & (align-1)) + { + size_t this_len = len & (align-1); + size_t start = len - this_len; + uint8_t chunk[align]; + + cmemif->read_chunk(addr + start, align, chunk); + memcpy((char*)bytes + start, chunk, this_len); + + len -= this_len; + } + + // now we're aligned + for (size_t pos = 0; pos < len; pos += cmemif->chunk_max_size()) + cmemif->read_chunk(addr + pos, std::min(cmemif->chunk_max_size(), len - pos), (char*)bytes + pos); +} + +void memif_t::write(addr_t addr, size_t len, const void* bytes) +{ + size_t align = cmemif->chunk_align(); + if (len && (addr & (align-1))) + { + size_t this_len = std::min(len, align - size_t(addr & (align-1))); + uint8_t chunk[align]; + + cmemif->read_chunk(addr & ~(align-1), align, chunk); + memcpy(chunk + (addr & (align-1)), bytes, this_len); + cmemif->write_chunk(addr & ~(align-1), align, chunk); + + bytes = (char*)bytes + this_len; + addr += this_len; + len -= this_len; + } + + if (len & (align-1)) + { + size_t this_len = len & (align-1); + size_t start = len - this_len; + uint8_t chunk[align]; + + cmemif->read_chunk(addr + start, align, chunk); + memcpy(chunk, (char*)bytes + start, this_len); + cmemif->write_chunk(addr + start, align, chunk); + + len -= this_len; + } + + // now we're aligned + bool all_zero = len != 0; + for (size_t i = 0; i < len; i++) + all_zero &= ((const char*)bytes)[i] == 0; + + if (all_zero) { + cmemif->clear_chunk(addr, len); + } else { + size_t max_chunk = cmemif->chunk_max_size(); + for (size_t pos = 0; pos < len; pos += max_chunk) + cmemif->write_chunk(addr + pos, std::min(max_chunk, len - pos), (char*)bytes + pos); + } +} + +#define MEMIF_READ_FUNC \ + if(addr & (sizeof(val)-1)) \ + throw std::runtime_error("misaligned address"); \ + this->read(addr, sizeof(val), &val); \ + return val + +#define MEMIF_WRITE_FUNC \ + if(addr & (sizeof(val)-1)) \ + throw std::runtime_error("misaligned address"); \ + this->write(addr, sizeof(val), &val) + +target_endian memif_t::read_uint8(addr_t addr) +{ + target_endian val; + MEMIF_READ_FUNC; +} + +target_endian memif_t::read_int8(addr_t addr) +{ + target_endian val; + MEMIF_READ_FUNC; +} + +void memif_t::write_uint8(addr_t addr, target_endian val) +{ + MEMIF_WRITE_FUNC; +} + +void memif_t::write_int8(addr_t addr, target_endian val) +{ + MEMIF_WRITE_FUNC; +} + +target_endian memif_t::read_uint16(addr_t addr) +{ + target_endian val; + MEMIF_READ_FUNC; +} + +target_endian memif_t::read_int16(addr_t addr) +{ + target_endian val; + MEMIF_READ_FUNC; +} + +void memif_t::write_uint16(addr_t addr, target_endian val) +{ + MEMIF_WRITE_FUNC; +} + +void memif_t::write_int16(addr_t addr, target_endian val) +{ + MEMIF_WRITE_FUNC; +} + +target_endian memif_t::read_uint32(addr_t addr) +{ + target_endian val; + MEMIF_READ_FUNC; +} + +target_endian memif_t::read_int32(addr_t addr) +{ + target_endian val; + MEMIF_READ_FUNC; +} + +void memif_t::write_uint32(addr_t addr, target_endian val) +{ + MEMIF_WRITE_FUNC; +} + +void memif_t::write_int32(addr_t addr, target_endian val) +{ + MEMIF_WRITE_FUNC; +} + +target_endian memif_t::read_uint64(addr_t addr) +{ + target_endian val; + MEMIF_READ_FUNC; +} + +target_endian memif_t::read_int64(addr_t addr) +{ + target_endian val; + MEMIF_READ_FUNC; +} + +void memif_t::write_uint64(addr_t addr, target_endian val) +{ + MEMIF_WRITE_FUNC; +} + +void memif_t::write_int64(addr_t addr, target_endian val) +{ + MEMIF_WRITE_FUNC; +} diff --git a/vendor/riscv-isa-sim/fesvr/memif.h b/vendor/riscv-isa-sim/fesvr/memif.h new file mode 100644 index 00000000..001c4254 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/memif.h @@ -0,0 +1,82 @@ +// See LICENSE for license details. + +#ifndef __MEMIF_H +#define __MEMIF_H + +#include +#include +#include "byteorder.h" + +typedef uint64_t reg_t; +typedef int64_t sreg_t; +typedef reg_t addr_t; + +typedef enum { + memif_endianness_undecided, + memif_endianness_little, + memif_endianness_big +} memif_endianness_t; + +class chunked_memif_t +{ +public: + virtual void read_chunk(addr_t taddr, size_t len, void* dst) = 0; + virtual void write_chunk(addr_t taddr, size_t len, const void* src) = 0; + virtual void clear_chunk(addr_t taddr, size_t len) = 0; + + virtual size_t chunk_align() = 0; + virtual size_t chunk_max_size() = 0; + + virtual void set_target_endianness(memif_endianness_t endianness) {} + virtual memif_endianness_t get_target_endianness() const { + return memif_endianness_undecided; + } +}; + +class memif_t +{ +public: + memif_t(chunked_memif_t* _cmemif) : cmemif(_cmemif) {} + virtual ~memif_t(){} + + // read and write byte arrays + virtual void read(addr_t addr, size_t len, void* bytes); + virtual void write(addr_t addr, size_t len, const void* bytes); + + // read and write 8-bit words + virtual target_endian read_uint8(addr_t addr); + virtual target_endian read_int8(addr_t addr); + virtual void write_uint8(addr_t addr, target_endian val); + virtual void write_int8(addr_t addr, target_endian val); + + // read and write 16-bit words + virtual target_endian read_uint16(addr_t addr); + virtual target_endian read_int16(addr_t addr); + virtual void write_uint16(addr_t addr, target_endian val); + virtual void write_int16(addr_t addr, target_endian val); + + // read and write 32-bit words + virtual target_endian read_uint32(addr_t addr); + virtual target_endian read_int32(addr_t addr); + virtual void write_uint32(addr_t addr, target_endian val); + virtual void write_int32(addr_t addr, target_endian val); + + // read and write 64-bit words + virtual target_endian read_uint64(addr_t addr); + virtual target_endian read_int64(addr_t addr); + virtual void write_uint64(addr_t addr, target_endian val); + virtual void write_int64(addr_t addr, target_endian val); + + // endianness + virtual void set_target_endianness(memif_endianness_t endianness) { + cmemif->set_target_endianness(endianness); + } + virtual memif_endianness_t get_target_endianness() const { + return cmemif->get_target_endianness(); + } + +protected: + chunked_memif_t* cmemif; +}; + +#endif // __MEMIF_H diff --git a/vendor/riscv-isa-sim/fesvr/option_parser.cc b/vendor/riscv-isa-sim/fesvr/option_parser.cc new file mode 100644 index 00000000..72daec40 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/option_parser.cc @@ -0,0 +1,51 @@ +// See LICENSE for license details. + +#include "option_parser.h" +#include +#include +#include +#include + +void option_parser_t::option(char c, const char* s, int arg, std::function action) +{ + opts.push_back(option_t(c, s, arg, action)); +} + +const char* const* option_parser_t::parse(const char* const* argv0) +{ + assert(argv0); + const char* const* argv = argv0 + 1; + for (const char* opt; (opt = *argv) != NULL && opt[0] == '-'; argv++) + { + bool found = false; + for (auto it = opts.begin(); !found && it != opts.end(); it++) + { + size_t slen = it->str ? strlen(it->str) : 0; + bool chr_match = opt[1] != '-' && it->chr && opt[1] == it->chr; + bool str_match = opt[1] == '-' && slen && strncmp(opt+2, it->str, slen) == 0; + if (chr_match || (str_match && (opt[2+slen] == '=' || opt[2+slen] == '\0'))) + { + const char* optarg = + chr_match ? (opt[2] ? &opt[2] : NULL) : + opt[2+slen] ? &opt[3+slen] : + it->arg ? *(++argv) : NULL; + if (optarg && !it->arg) + error("no argument allowed for option", *argv0, opt); + if (!optarg && it->arg) + error("argument required for option", *argv0, opt); + it->func(optarg); + found = true; + } + } + if (!found) + error("unrecognized option", *argv0, opt); + } + return argv; +} + +void option_parser_t::error(const char* msg, const char* argv0, const char* arg) +{ + fprintf(stderr, "%s: %s %s\n", argv0, msg, arg ? arg : ""); + if (helpmsg) helpmsg(); + exit(1); +} diff --git a/vendor/riscv-isa-sim/fesvr/option_parser.h b/vendor/riscv-isa-sim/fesvr/option_parser.h new file mode 100644 index 00000000..b2cb8edf --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/option_parser.h @@ -0,0 +1,31 @@ +// See LICENSE for license details. + +#ifndef _OPTION_PARSER_H +#define _OPTION_PARSER_H + +#include +#include + +class option_parser_t +{ + public: + option_parser_t() : helpmsg(0) {} + void help(void (*helpm)(void)) { helpmsg = helpm; } + void option(char c, const char* s, int arg, std::function action); + const char* const* parse(const char* const* argv0); + private: + struct option_t + { + char chr; + const char* str; + int arg; + std::function func; + option_t(char chr, const char* str, int arg, std::function func) + : chr(chr), str(str), arg(arg), func(func) {} + }; + std::vector opts; + void (*helpmsg)(void); + void error(const char* msg, const char* argv0, const char* arg); +}; + +#endif diff --git a/vendor/riscv-isa-sim/fesvr/rfb.cc b/vendor/riscv-isa-sim/fesvr/rfb.cc new file mode 100644 index 00000000..2594a1b8 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/rfb.cc @@ -0,0 +1,230 @@ +#include "rfb.h" +#include "memif.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +using namespace std::placeholders; + +rfb_t::rfb_t(int display) + : sockfd(-1), afd(-1), + memif(0), addr(0), width(0), height(0), bpp(0), display(display), + thread(pthread_self()), fb1(0), fb2(0), read_pos(0), + lock(PTHREAD_MUTEX_INITIALIZER) +{ + register_command(0, std::bind(&rfb_t::handle_configure, this, _1), "configure"); + register_command(1, std::bind(&rfb_t::handle_set_address, this, _1), "set_address"); +} + +void* rfb_thread_main(void* arg) +{ + ((rfb_t*)arg)->thread_main(); + return 0; +} + +void rfb_t::thread_main() +{ + pthread_mutex_lock(&lock); + + int port = 5900 + display; + sockfd = socket(PF_INET, SOCK_STREAM, 0); + if (sockfd < 0) + throw std::runtime_error("could not acquire tcp socket"); + + struct sockaddr_in saddr, caddr; + saddr.sin_family = AF_INET; + saddr.sin_addr.s_addr = INADDR_ANY; + saddr.sin_port = htons(port); + if (bind(sockfd, (struct sockaddr*)&saddr, sizeof(saddr)) < 0) + throw std::runtime_error("could not bind to port " + std::to_string(port)); + if (listen(sockfd, 0) < 0) + throw std::runtime_error("could not listen on port " + std::to_string(port)); + + socklen_t clen = sizeof(caddr); + afd = accept(sockfd, (struct sockaddr*)&caddr, &clen); + if (afd < 0) + throw std::runtime_error("could not accept connection"); + + std::string version = "RFB 003.003\n"; + write(version); + if (read() != version) + throw std::runtime_error("bad client version"); + + write(str(uint32_t(htonl(1)))); + + read(); // clientinit + + std::string serverinit; + serverinit += str(uint16_t(htons(width))); + serverinit += str(uint16_t(htons(height))); + serverinit += pixel_format(); + std::string name = "RISC-V"; + serverinit += str(uint32_t(htonl(name.length()))); + serverinit += name; + write(serverinit); + + pthread_mutex_unlock(&lock); + + while (memif == NULL) + sched_yield(); + + while (memif != NULL) + { + std::string s = read(); + if (s.length() < 4) + break; //throw std::runtime_error("bad command"); + + switch (s[0]) + { + case 0: set_pixel_format(s); break; + case 2: set_encodings(s); break; + case 3: break; + } + } + + pthread_mutex_lock(&lock); + close(afd); + close(sockfd); + afd = -1; + sockfd = -1; + pthread_mutex_unlock(&lock); + + thread_main(); +} + +rfb_t::~rfb_t() +{ + memif = 0; + if (!pthread_equal(pthread_self(), thread)) + pthread_join(thread, 0); + delete [] fb1; + delete [] fb2; +} + +void rfb_t::set_encodings(const std::string& s) +{ + uint16_t n = htons(*(uint16_t*)&s[2]); + for (size_t b = s.length(); b < 4U+4U*n; b += read().length()); +} + +void rfb_t::set_pixel_format(const std::string& s) +{ + if (s.length() != 20 || s.substr(4, 16) != pixel_format()) + throw std::runtime_error("bad pixel format"); +} + +void rfb_t::fb_update(const std::string& s) +{ + std::string u; + u += str(uint8_t(0)); + u += str(uint8_t(0)); + u += str(uint16_t(htons(1))); + u += str(uint16_t(htons(0))); + u += str(uint16_t(htons(0))); + u += str(uint16_t(htons(width))); + u += str(uint16_t(htons(height))); + u += str(uint32_t(htonl(0))); + u += std::string((char*)fb1, fb_bytes()); + + try + { + write(u); + } + catch (std::runtime_error& e) + { + } +} + +void rfb_t::tick() +{ + if (fb_bytes() == 0 || memif == NULL) + return; + + memif->read(addr + read_pos, FB_ALIGN, const_cast(fb2 + read_pos)); + read_pos = (read_pos + FB_ALIGN) % fb_bytes(); + if (read_pos == 0) + { + std::swap(fb1, fb2); + if (pthread_mutex_trylock(&lock) == 0) + { + fb_update(""); + pthread_mutex_unlock(&lock); + } + } +} + +std::string rfb_t::pixel_format() +{ + int red_bits = 8, green_bits = 8, blue_bits = 8; + int bpp = red_bits + green_bits + blue_bits; + while (bpp & (bpp-1)) bpp++; + + std::string fmt; + fmt += str(uint8_t(bpp)); + fmt += str(uint8_t(red_bits + green_bits + blue_bits)); + fmt += str(uint8_t(0)); // little-endian + fmt += str(uint8_t(1)); // true color + fmt += str(uint16_t(htons((1<> 16; + + bpp = cmd.payload() >> 32; + if (bpp != 32) + throw std::runtime_error("rfb requires 32 bpp true color"); + + if (fb_bytes() % FB_ALIGN != 0) + throw std::runtime_error("rfb size must be a multiple of " + std::to_string(FB_ALIGN)); + + fb1 = new char[fb_bytes()]; + fb2 = new char[fb_bytes()]; + if (pthread_create(&thread, 0, rfb_thread_main, this)) + throw std::runtime_error("could not create thread"); + cmd.respond(1); +} + +void rfb_t::handle_set_address(command_t cmd) +{ + addr = cmd.payload(); + if (addr % FB_ALIGN != 0) + throw std::runtime_error("rfb address must be " + std::to_string(FB_ALIGN) + "-byte aligned"); + memif = &cmd.memif(); + cmd.respond(1); +} diff --git a/vendor/riscv-isa-sim/fesvr/rfb.h b/vendor/riscv-isa-sim/fesvr/rfb.h new file mode 100644 index 00000000..263663a2 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/rfb.h @@ -0,0 +1,53 @@ +#ifndef _RFB_H +#define _RFB_H + +#include "device.h" +#include "memif.h" +#include + +// remote frame buffer +class rfb_t : public device_t +{ + public: + rfb_t(int display = 0); + ~rfb_t(); + void tick(); + std::string name() { return "RISC-V"; } + const char* identity() { return "rfb"; } + + private: + template + std::string str(T x) + { + return std::string((char*)&x, sizeof(x)); + } + size_t fb_bytes() { return size_t(width) * height * bpp/8; } + void thread_main(); + friend void* rfb_thread_main(void*); + std::string pixel_format(); + void fb_update(const std::string& s); + void set_encodings(const std::string& s); + void set_pixel_format(const std::string& s); + void write(const std::string& s); + std::string read(); + void handle_configure(command_t cmd); + void handle_set_address(command_t cmd); + + int sockfd; + int afd; + memif_t* memif; + reg_t addr; + uint16_t width; + uint16_t height; + uint16_t bpp; + int display; + pthread_t thread; + volatile char* volatile fb1; + volatile char* volatile fb2; + size_t read_pos; + pthread_mutex_t lock; + + static const int FB_ALIGN = 256; +}; + +#endif diff --git a/vendor/riscv-isa-sim/fesvr/syscall.cc b/vendor/riscv-isa-sim/fesvr/syscall.cc new file mode 100644 index 00000000..ab7fc3b4 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/syscall.cc @@ -0,0 +1,502 @@ +// See LICENSE for license details. + +#include "syscall.h" +#include "htif.h" +#include "byteorder.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +using namespace std::placeholders; + +#define RISCV_AT_FDCWD -100 + +struct riscv_stat +{ + target_endian dev; + target_endian ino; + target_endian mode; + target_endian nlink; + target_endian uid; + target_endian gid; + target_endian rdev; + target_endian __pad1; + target_endian size; + target_endian blksize; + target_endian __pad2; + target_endian blocks; + target_endian atime; + target_endian __pad3; + target_endian mtime; + target_endian __pad4; + target_endian ctime; + target_endian __pad5; + target_endian __unused4; + target_endian __unused5; + + riscv_stat(const struct stat& s, htif_t* htif) + : dev(htif->to_target(s.st_dev)), + ino(htif->to_target(s.st_ino)), + mode(htif->to_target(s.st_mode)), + nlink(htif->to_target(s.st_nlink)), + uid(htif->to_target(s.st_uid)), + gid(htif->to_target(s.st_gid)), + rdev(htif->to_target(s.st_rdev)), __pad1(), + size(htif->to_target(s.st_size)), + blksize(htif->to_target(s.st_blksize)), __pad2(), + blocks(htif->to_target(s.st_blocks)), + atime(htif->to_target(s.st_atime)), __pad3(), + mtime(htif->to_target(s.st_mtime)), __pad4(), + ctime(htif->to_target(s.st_ctime)), __pad5(), + __unused4(), __unused5() {} +}; + + +struct riscv_statx_timestamp { + target_endian tv_sec; + target_endian tv_nsec; + target_endian __reserved; +}; + +#ifdef HAVE_STATX +struct riscv_statx +{ + target_endian mask; + target_endian blksize; + target_endian attributes; + target_endian nlink; + target_endian uid; + target_endian gid; + target_endian mode; + target_endian __spare0[1]; + target_endian ino; + target_endian size; + target_endian blocks; + target_endian attributes_mask; + struct riscv_statx_timestamp atime; + struct riscv_statx_timestamp btime; + struct riscv_statx_timestamp ctime; + struct riscv_statx_timestamp mtime; + target_endian rdev_major; + target_endian rdev_minor; + target_endian dev_major; + target_endian dev_minor; +#ifdef HAVE_STATX_MNT_ID + target_endian mnt_id; + target_endian __spare2; + target_endian __spare3[12]; +#else + target_endian __spare2[14]; +#endif + + riscv_statx(const struct statx& s, htif_t* htif) + : mask(htif->to_target(s.stx_mask)), + blksize(htif->to_target(s.stx_blksize)), + attributes(htif->to_target(s.stx_attributes)), + nlink(htif->to_target(s.stx_nlink)), + uid(htif->to_target(s.stx_uid)), + gid(htif->to_target(s.stx_gid)), + mode(htif->to_target(s.stx_mode)), __spare0(), + ino(htif->to_target(s.stx_ino)), + size(htif->to_target(s.stx_size)), + blocks(htif->to_target(s.stx_blocks)), + attributes_mask(htif->to_target(s.stx_attributes_mask)), + atime { + htif->to_target(s.stx_atime.tv_sec), + htif->to_target(s.stx_atime.tv_nsec) + }, + btime { + htif->to_target(s.stx_btime.tv_sec), + htif->to_target(s.stx_btime.tv_nsec) + }, + ctime { + htif->to_target(s.stx_ctime.tv_sec), + htif->to_target(s.stx_ctime.tv_nsec) + }, + mtime { + htif->to_target(s.stx_mtime.tv_sec), + htif->to_target(s.stx_mtime.tv_nsec) + }, + rdev_major(htif->to_target(s.stx_rdev_major)), + rdev_minor(htif->to_target(s.stx_rdev_minor)), + dev_major(htif->to_target(s.stx_dev_major)), + dev_minor(htif->to_target(s.stx_dev_minor)), +#ifdef HAVE_STATX_MNT_ID + mnt_id(htif->to_target(s.stx_mnt_id)), + __spare2(), __spare3() +#else + __spare2() +#endif + {} +}; +#endif + +syscall_t::syscall_t(htif_t* htif) + : htif(htif), memif(&htif->memif()), table(2048) +{ + table[17] = &syscall_t::sys_getcwd; + table[25] = &syscall_t::sys_fcntl; + table[34] = &syscall_t::sys_mkdirat; + table[35] = &syscall_t::sys_unlinkat; + table[37] = &syscall_t::sys_linkat; + table[38] = &syscall_t::sys_renameat; + table[46] = &syscall_t::sys_ftruncate; + table[48] = &syscall_t::sys_faccessat; + table[49] = &syscall_t::sys_chdir; + table[56] = &syscall_t::sys_openat; + table[57] = &syscall_t::sys_close; + table[62] = &syscall_t::sys_lseek; + table[63] = &syscall_t::sys_read; + table[64] = &syscall_t::sys_write; + table[67] = &syscall_t::sys_pread; + table[68] = &syscall_t::sys_pwrite; + table[79] = &syscall_t::sys_fstatat; + table[80] = &syscall_t::sys_fstat; + table[93] = &syscall_t::sys_exit; + table[291] = &syscall_t::sys_statx; + table[1039] = &syscall_t::sys_lstat; + table[2011] = &syscall_t::sys_getmainvars; + + register_command(0, std::bind(&syscall_t::handle_syscall, this, _1), "syscall"); + + int stdin_fd = dup(0), stdout_fd0 = dup(1), stdout_fd1 = dup(1); + if (stdin_fd < 0 || stdout_fd0 < 0 || stdout_fd1 < 0) + throw std::runtime_error("could not dup stdin/stdout"); + + fds.alloc(stdin_fd); // stdin -> stdin + fds.alloc(stdout_fd0); // stdout -> stdout + fds.alloc(stdout_fd1); // stderr -> stdout +} + +std::string syscall_t::do_chroot(const char* fn) +{ + if (!chroot.empty() && *fn == '/') + return chroot + fn; + return fn; +} + +std::string syscall_t::undo_chroot(const char* fn) +{ + if (chroot.empty()) + return fn; + if (strncmp(fn, chroot.c_str(), chroot.size()) == 0 + && (chroot.back() == '/' || fn[chroot.size()] == '/')) + return fn + chroot.size() - (chroot.back() == '/'); + return "/"; +} + +void syscall_t::handle_syscall(command_t cmd) +{ + if (cmd.payload() & 1) // test pass/fail + { + htif->exitcode = cmd.payload(); + if (htif->exit_code()) + std::cerr << "*** FAILED *** (tohost = " << htif->exit_code() << ")" << std::endl; + return; + } + else // proxied system call + dispatch(cmd.payload()); + + cmd.respond(1); +} + +reg_t syscall_t::sys_exit(reg_t code, reg_t a1, reg_t a2, reg_t a3, reg_t a4, reg_t a5, reg_t a6) +{ + htif->exitcode = code << 1 | 1; + return 0; +} + +static reg_t sysret_errno(sreg_t ret) +{ + return ret == -1 ? -errno : ret; +} + +reg_t syscall_t::sys_read(reg_t fd, reg_t pbuf, reg_t len, reg_t a3, reg_t a4, reg_t a5, reg_t a6) +{ + std::vector buf(len); + ssize_t ret = read(fds.lookup(fd), buf.data(), len); + reg_t ret_errno = sysret_errno(ret); + if (ret > 0) + memif->write(pbuf, ret, buf.data()); + return ret_errno; +} + +reg_t syscall_t::sys_pread(reg_t fd, reg_t pbuf, reg_t len, reg_t off, reg_t a4, reg_t a5, reg_t a6) +{ + std::vector buf(len); + ssize_t ret = pread(fds.lookup(fd), buf.data(), len, off); + reg_t ret_errno = sysret_errno(ret); + if (ret > 0) + memif->write(pbuf, ret, buf.data()); + return ret_errno; +} + +reg_t syscall_t::sys_write(reg_t fd, reg_t pbuf, reg_t len, reg_t a3, reg_t a4, reg_t a5, reg_t a6) +{ + std::vector buf(len); + memif->read(pbuf, len, buf.data()); + reg_t ret = sysret_errno(write(fds.lookup(fd), buf.data(), len)); + return ret; +} + +reg_t syscall_t::sys_pwrite(reg_t fd, reg_t pbuf, reg_t len, reg_t off, reg_t a4, reg_t a5, reg_t a6) +{ + std::vector buf(len); + memif->read(pbuf, len, buf.data()); + reg_t ret = sysret_errno(pwrite(fds.lookup(fd), buf.data(), len, off)); + return ret; +} + +reg_t syscall_t::sys_close(reg_t fd, reg_t a1, reg_t a2, reg_t a3, reg_t a4, reg_t a5, reg_t a6) +{ + if (close(fds.lookup(fd)) < 0) + return sysret_errno(-1); + fds.dealloc(fd); + return 0; +} + +reg_t syscall_t::sys_lseek(reg_t fd, reg_t ptr, reg_t dir, reg_t a3, reg_t a4, reg_t a5, reg_t a6) +{ + return sysret_errno(lseek(fds.lookup(fd), ptr, dir)); +} + +reg_t syscall_t::sys_fstat(reg_t fd, reg_t pbuf, reg_t a2, reg_t a3, reg_t a4, reg_t a5, reg_t a6) +{ + struct stat buf; + reg_t ret = sysret_errno(fstat(fds.lookup(fd), &buf)); + if (ret != (reg_t)-1) + { + riscv_stat rbuf(buf, htif); + memif->write(pbuf, sizeof(rbuf), &rbuf); + } + return ret; +} + +reg_t syscall_t::sys_fcntl(reg_t fd, reg_t cmd, reg_t arg, reg_t a3, reg_t a4, reg_t a5, reg_t a6) +{ + return sysret_errno(fcntl(fds.lookup(fd), cmd, arg)); +} + +reg_t syscall_t::sys_ftruncate(reg_t fd, reg_t len, reg_t a2, reg_t a3, reg_t a4, reg_t a5, reg_t a6) +{ + return sysret_errno(ftruncate(fds.lookup(fd), len)); +} + +reg_t syscall_t::sys_lstat(reg_t pname, reg_t len, reg_t pbuf, reg_t a3, reg_t a4, reg_t a5, reg_t a6) +{ + std::vector name(len); + memif->read(pname, len, name.data()); + + struct stat buf; + reg_t ret = sysret_errno(lstat(do_chroot(name.data()).c_str(), &buf)); + if (ret != (reg_t)-1) + { + riscv_stat rbuf(buf, htif); + memif->write(pbuf, sizeof(rbuf), &rbuf); + } + return ret; +} + +reg_t syscall_t::sys_statx(reg_t fd, reg_t pname, reg_t len, reg_t flags, reg_t mask, reg_t pbuf, reg_t a6) +{ +#ifndef HAVE_STATX + return -ENOSYS; +#else + std::vector name(len); + memif->read(pname, len, name.data()); + + struct statx buf; + reg_t ret = sysret_errno(statx(fds.lookup(fd), do_chroot(name.data()).c_str(), flags, mask, &buf)); + if (ret != (reg_t)-1) + { + riscv_statx rbuf(buf, htif); + memif->write(pbuf, sizeof(rbuf), &rbuf); + } + return ret; +#endif +} + +#define AT_SYSCALL(syscall, fd, name, ...) \ + (syscall(fds.lookup(fd), int(fd) == RISCV_AT_FDCWD ? do_chroot(name).c_str() : (name), __VA_ARGS__)) + +reg_t syscall_t::sys_openat(reg_t dirfd, reg_t pname, reg_t len, reg_t flags, reg_t mode, reg_t a5, reg_t a6) +{ + std::vector name(len); + memif->read(pname, len, name.data()); + int fd = sysret_errno(AT_SYSCALL(openat, dirfd, name.data(), flags, mode)); + if (fd < 0) + return sysret_errno(-1); + return fds.alloc(fd); +} + +reg_t syscall_t::sys_fstatat(reg_t dirfd, reg_t pname, reg_t len, reg_t pbuf, reg_t flags, reg_t a5, reg_t a6) +{ + std::vector name(len); + memif->read(pname, len, name.data()); + + struct stat buf; + reg_t ret = sysret_errno(AT_SYSCALL(fstatat, dirfd, name.data(), &buf, flags)); + if (ret != (reg_t)-1) + { + riscv_stat rbuf(buf, htif); + memif->write(pbuf, sizeof(rbuf), &rbuf); + } + return ret; +} + +reg_t syscall_t::sys_faccessat(reg_t dirfd, reg_t pname, reg_t len, reg_t mode, reg_t a4, reg_t a5, reg_t a6) +{ + std::vector name(len); + memif->read(pname, len, name.data()); + return sysret_errno(AT_SYSCALL(faccessat, dirfd, name.data(), mode, 0)); +} + +reg_t syscall_t::sys_renameat(reg_t odirfd, reg_t popath, reg_t olen, reg_t ndirfd, reg_t pnpath, reg_t nlen, reg_t a6) +{ + std::vector opath(olen), npath(nlen); + memif->read(popath, olen, opath.data()); + memif->read(pnpath, nlen, npath.data()); + return sysret_errno(renameat(fds.lookup(odirfd), int(odirfd) == RISCV_AT_FDCWD ? do_chroot(opath.data()).c_str() : opath.data(), + fds.lookup(ndirfd), int(ndirfd) == RISCV_AT_FDCWD ? do_chroot(npath.data()).c_str() : npath.data())); +} + +reg_t syscall_t::sys_linkat(reg_t odirfd, reg_t poname, reg_t olen, reg_t ndirfd, reg_t pnname, reg_t nlen, reg_t flags) +{ + std::vector oname(olen), nname(nlen); + memif->read(poname, olen, oname.data()); + memif->read(pnname, nlen, nname.data()); + return sysret_errno(linkat(fds.lookup(odirfd), int(odirfd) == RISCV_AT_FDCWD ? do_chroot(oname.data()).c_str() : oname.data(), + fds.lookup(ndirfd), int(ndirfd) == RISCV_AT_FDCWD ? do_chroot(nname.data()).c_str() : nname.data(), + flags)); +} + +reg_t syscall_t::sys_unlinkat(reg_t dirfd, reg_t pname, reg_t len, reg_t flags, reg_t a4, reg_t a5, reg_t a6) +{ + std::vector name(len); + memif->read(pname, len, name.data()); + return sysret_errno(AT_SYSCALL(unlinkat, dirfd, name.data(), flags)); +} + +reg_t syscall_t::sys_mkdirat(reg_t dirfd, reg_t pname, reg_t len, reg_t mode, reg_t a4, reg_t a5, reg_t a6) +{ + std::vector name(len); + memif->read(pname, len, name.data()); + return sysret_errno(AT_SYSCALL(mkdirat, dirfd, name.data(), mode)); +} + +reg_t syscall_t::sys_getcwd(reg_t pbuf, reg_t size, reg_t a2, reg_t a3, reg_t a4, reg_t a5, reg_t a6) +{ + std::vector buf(size); + char* ret = getcwd(buf.data(), size); + if (ret == NULL) + return sysret_errno(-1); + std::string tmp = undo_chroot(buf.data()); + if (size <= tmp.size()) + return -ENOMEM; + memif->write(pbuf, tmp.size() + 1, tmp.data()); + return tmp.size() + 1; +} + +reg_t syscall_t::sys_getmainvars(reg_t pbuf, reg_t limit, reg_t a2, reg_t a3, reg_t a4, reg_t a5, reg_t a6) +{ + std::vector args = htif->target_args(); + std::vector> words(args.size() + 3); + words[0] = htif->to_target(args.size()); + words[args.size()+1] = target_endian::zero; // argv[argc] = NULL + words[args.size()+2] = target_endian::zero; // envp[0] = NULL + + size_t sz = (args.size() + 3) * sizeof(words[0]); + for (size_t i = 0; i < args.size(); i++) + { + words[i+1] = htif->to_target(sz + pbuf); + sz += args[i].length() + 1; + } + + std::vector bytes(sz); + memcpy(bytes.data(), words.data(), sizeof(words[0]) * words.size()); + for (size_t i = 0; i < args.size(); i++) + strcpy(&bytes[htif->from_target(words[i+1]) - pbuf], args[i].c_str()); + + if (bytes.size() > limit) + return -ENOMEM; + + memif->write(pbuf, bytes.size(), bytes.data()); + return 0; +} + +reg_t syscall_t::sys_chdir(reg_t path, reg_t a1, reg_t a2, reg_t a3, reg_t a4, reg_t a5, reg_t a6) +{ + size_t size = 0; + while (memif->read_uint8(path + size++)) + ; + std::vector buf(size); + for (size_t offset = 0;; offset++) + { + buf[offset] = memif->read_uint8(path + offset); + if (!buf[offset]) + break; + } + return sysret_errno(chdir(buf.data())); +} + +void syscall_t::dispatch(reg_t mm) +{ + target_endian magicmem[8]; + memif->read(mm, sizeof(magicmem), magicmem); + + reg_t n = htif->from_target(magicmem[0]); + if (n >= table.size() || !table[n]) + throw std::runtime_error("bad syscall #" + std::to_string(n)); + + magicmem[0] = htif->to_target((this->*table[n])(htif->from_target(magicmem[1]), htif->from_target(magicmem[2]), htif->from_target(magicmem[3]), htif->from_target(magicmem[4]), htif->from_target(magicmem[5]), htif->from_target(magicmem[6]), htif->from_target(magicmem[7]))); + + memif->write(mm, sizeof(magicmem), magicmem); +} + +reg_t fds_t::alloc(int fd) +{ + reg_t i; + for (i = 0; i < fds.size(); i++) + if (fds[i] == -1) + break; + + if (i == fds.size()) + fds.resize(i+1); + + fds[i] = fd; + return i; +} + +void fds_t::dealloc(reg_t fd) +{ + fds[fd] = -1; +} + +int fds_t::lookup(reg_t fd) +{ + if (int(fd) == RISCV_AT_FDCWD) + return AT_FDCWD; + return fd >= fds.size() ? -1 : fds[fd]; +} + +void syscall_t::set_chroot(const char* where) +{ + char buf1[PATH_MAX], buf2[PATH_MAX]; + + if (getcwd(buf1, sizeof(buf1)) == NULL + || chdir(where) != 0 + || getcwd(buf2, sizeof(buf2)) == NULL + || chdir(buf1) != 0) + { + fprintf(stderr, "could not chroot to %s\n", where); + exit(-1); + } + + chroot = buf2; +} diff --git a/vendor/riscv-isa-sim/fesvr/syscall.h b/vendor/riscv-isa-sim/fesvr/syscall.h new file mode 100644 index 00000000..4915efd6 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/syscall.h @@ -0,0 +1,73 @@ +// See LICENSE for license details. + +#ifndef __SYSCALL_H +#define __SYSCALL_H + +#include "device.h" +#include "memif.h" +#include +#include + +class syscall_t; +typedef reg_t (syscall_t::*syscall_func_t)(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + +class htif_t; +class memif_t; + +class fds_t +{ + public: + reg_t alloc(int fd); + void dealloc(reg_t fd); + int lookup(reg_t fd); + private: + std::vector fds; +}; + +class syscall_t : public device_t +{ + public: + syscall_t(htif_t*); + + void set_chroot(const char* where); + + private: + const char* identity() { return "syscall_proxy"; } + + htif_t* htif; + memif_t* memif; + std::vector table; + fds_t fds; + + void handle_syscall(command_t cmd); + void dispatch(addr_t mm); + + std::string chroot; + std::string do_chroot(const char* fn); + std::string undo_chroot(const char* fn); + + reg_t sys_exit(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_openat(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_read(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_pread(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_write(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_pwrite(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_close(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_lseek(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_fstat(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_lstat(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_statx(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_fstatat(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_faccessat(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_fcntl(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_ftruncate(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_renameat(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_linkat(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_unlinkat(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_mkdirat(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_getcwd(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_getmainvars(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_chdir(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); +}; + +#endif diff --git a/vendor/riscv-isa-sim/fesvr/term.cc b/vendor/riscv-isa-sim/fesvr/term.cc new file mode 100644 index 00000000..c4cba0c0 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/term.cc @@ -0,0 +1,53 @@ +#include "term.h" +#include +#include +#include +#include +#include + +class canonical_termios_t +{ + public: + canonical_termios_t() + : restore_tios(false) + { + if (tcgetattr(0, &old_tios) == 0) + { + struct termios new_tios = old_tios; + new_tios.c_lflag &= ~(ICANON | ECHO); + if (tcsetattr(0, TCSANOW, &new_tios) == 0) + restore_tios = true; + } + } + + ~canonical_termios_t() + { + if (restore_tios) + tcsetattr(0, TCSANOW, &old_tios); + } + private: + struct termios old_tios; + bool restore_tios; +}; + +static canonical_termios_t tios; // exit() will clean up for us + +int canonical_terminal_t::read() +{ + struct pollfd pfd; + pfd.fd = 0; + pfd.events = POLLIN; + int ret = poll(&pfd, 1, 0); + if (ret <= 0 || !(pfd.revents & POLLIN)) + return -1; + + unsigned char ch; + ret = ::read(0, &ch, 1); + return ret <= 0 ? -1 : ch; +} + +void canonical_terminal_t::write(char ch) +{ + if (::write(1, &ch, 1) != 1) + abort(); +} diff --git a/vendor/riscv-isa-sim/fesvr/term.h b/vendor/riscv-isa-sim/fesvr/term.h new file mode 100644 index 00000000..7a2c22fc --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/term.h @@ -0,0 +1,11 @@ +#ifndef _TERM_H +#define _TERM_H + +class canonical_terminal_t +{ + public: + static int read(); + static void write(char); +}; + +#endif diff --git a/vendor/riscv-isa-sim/fesvr/tsi.cc b/vendor/riscv-isa-sim/fesvr/tsi.cc new file mode 100644 index 00000000..5ccafc4b --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/tsi.cc @@ -0,0 +1,115 @@ +#include "tsi.h" +#include +#include + +#define NHARTS_MAX 16 + +void tsi_t::host_thread(void *arg) +{ + tsi_t *tsi = static_cast(arg); + tsi->run(); + + while (true) + tsi->target->switch_to(); +} + +tsi_t::tsi_t(int argc, char** argv) : htif_t(argc, argv) +{ + target = context_t::current(); + host.init(host_thread, this); +} + +tsi_t::~tsi_t(void) +{ +} + +#define MSIP_BASE 0x2000000 + +// Interrupt core 0 to make it start executing the program in DRAM +void tsi_t::reset() +{ + uint32_t one = 1; + + write_chunk(MSIP_BASE, sizeof(uint32_t), &one); +} + +void tsi_t::push_addr(addr_t addr) +{ + for (int i = 0; i < SAI_ADDR_CHUNKS; i++) { + in_data.push_back(addr & 0xffffffff); + addr = addr >> 32; + } +} + +void tsi_t::push_len(addr_t len) +{ + for (int i = 0; i < SAI_LEN_CHUNKS; i++) { + in_data.push_back(len & 0xffffffff); + len = len >> 32; + } +} + +void tsi_t::read_chunk(addr_t taddr, size_t nbytes, void* dst) +{ + uint32_t *result = static_cast(dst); + size_t len = nbytes / sizeof(uint32_t); + + in_data.push_back(SAI_CMD_READ); + push_addr(taddr); + push_len(len - 1); + + for (size_t i = 0; i < len; i++) { + while (out_data.empty()) + switch_to_target(); + result[i] = out_data.front(); + out_data.pop_front(); + } +} + +void tsi_t::write_chunk(addr_t taddr, size_t nbytes, const void* src) +{ + const uint32_t *src_data = static_cast(src); + size_t len = nbytes / sizeof(uint32_t); + + in_data.push_back(SAI_CMD_WRITE); + push_addr(taddr); + push_len(len - 1); + + in_data.insert(in_data.end(), src_data, src_data + len); +} + +void tsi_t::send_word(uint32_t word) +{ + out_data.push_back(word); +} + +uint32_t tsi_t::recv_word(void) +{ + uint32_t word = in_data.front(); + in_data.pop_front(); + return word; +} + +bool tsi_t::data_available(void) +{ + return !in_data.empty(); +} + +void tsi_t::switch_to_host(void) +{ + host.switch_to(); +} + +void tsi_t::switch_to_target(void) +{ + target->switch_to(); +} + +void tsi_t::tick(bool out_valid, uint32_t out_bits, bool in_ready) +{ + if (out_valid && out_ready()) + out_data.push_back(out_bits); + + if (in_valid() && in_ready) + in_data.pop_front(); +} diff --git a/vendor/riscv-isa-sim/fesvr/tsi.h b/vendor/riscv-isa-sim/fesvr/tsi.h new file mode 100644 index 00000000..825a3a00 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/tsi.h @@ -0,0 +1,57 @@ +#ifndef __SAI_H +#define __SAI_H + +#include "htif.h" +#include "context.h" + +#include +#include +#include +#include + +#define SAI_CMD_READ 0 +#define SAI_CMD_WRITE 1 + +#define SAI_ADDR_CHUNKS 2 +#define SAI_LEN_CHUNKS 2 + +class tsi_t : public htif_t +{ + public: + tsi_t(int argc, char** argv); + virtual ~tsi_t(); + + bool data_available(); + void send_word(uint32_t word); + uint32_t recv_word(); + void switch_to_host(); + + uint32_t in_bits() { return in_data.front(); } + bool in_valid() { return !in_data.empty(); } + bool out_ready() { return true; } + void tick(bool out_valid, uint32_t out_bits, bool in_ready); + + protected: + void reset() override; + void read_chunk(addr_t taddr, size_t nbytes, void* dst) override; + void write_chunk(addr_t taddr, size_t nbytes, const void* src) override; + void switch_to_target(); + + size_t chunk_align() override { return 4; } + size_t chunk_max_size() override { return 1024; } + + int get_ipi_addrs(addr_t *addrs); + + private: + context_t host; + context_t* target; + std::deque in_data; + std::deque out_data; + + void push_addr(addr_t addr); + void push_len(addr_t len); + + static void host_thread(void *tsi); +}; + +#endif diff --git a/vendor/riscv-isa-sim/riscv-disasm.pc.in b/vendor/riscv-isa-sim/riscv-disasm.pc.in new file mode 100644 index 00000000..8e022e93 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv-disasm.pc.in @@ -0,0 +1,11 @@ +prefix=@prefix@ +exec_prefix=@prefix@ +libdir=${prefix}/@libdir@ +includedir=${prefix}/@includedir@ + +Name: riscv-disasm +Description: RISC-V disassembler +Version: git +Libs: -Wl,-rpath,${libdir} -L${libdir} -ldisasm +Cflags: -I${includedir} +URL: http://riscv.org/download.html#tab_disasm diff --git a/vendor/riscv-isa-sim/riscv-fesvr.pc.in b/vendor/riscv-isa-sim/riscv-fesvr.pc.in new file mode 100644 index 00000000..efd7eed1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv-fesvr.pc.in @@ -0,0 +1,11 @@ +prefix=@prefix@ +exec_prefix=@prefix@ +libdir=${prefix}/@libdir@ +includedir=${prefix}/@includedir@ + +Name: riscv-fesvr +Description: RISC-V front-end server +Version: git +Libs: -Wl,-rpath,${libdir} -L${libdir} -lfesvr +Cflags: -I${includedir} +URL: http://riscv.org/download.html#tab_fesvr diff --git a/vendor/riscv-isa-sim/riscv/abstract_device.h b/vendor/riscv-isa-sim/riscv/abstract_device.h new file mode 100644 index 00000000..559c64f6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/abstract_device.h @@ -0,0 +1,15 @@ +#ifndef _RISCV_ABSTRACT_DEVICE_H +#define _RISCV_ABSTRACT_DEVICE_H + +#include "decode.h" +#include +#include + +class abstract_device_t { + public: + virtual bool load(reg_t addr, size_t len, uint8_t* bytes) = 0; + virtual bool store(reg_t addr, size_t len, const uint8_t* bytes) = 0; + virtual ~abstract_device_t() {} +}; + +#endif diff --git a/vendor/riscv-isa-sim/riscv/arith.h b/vendor/riscv-isa-sim/riscv/arith.h new file mode 100644 index 00000000..9e0c2f74 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/arith.h @@ -0,0 +1,216 @@ +// See LICENSE for license details. + +#ifndef _RISCV_ARITH_H +#define _RISCV_ARITH_H + +#include +#include +#include +#include + +inline uint64_t mulhu(uint64_t a, uint64_t b) +{ + uint64_t t; + uint32_t y1, y2, y3; + uint64_t a0 = (uint32_t)a, a1 = a >> 32; + uint64_t b0 = (uint32_t)b, b1 = b >> 32; + + t = a1*b0 + ((a0*b0) >> 32); + y1 = t; + y2 = t >> 32; + + t = a0*b1 + y1; + y1 = t; + + t = a1*b1 + y2 + (t >> 32); + y2 = t; + y3 = t >> 32; + + return ((uint64_t)y3 << 32) | y2; +} + +inline int64_t mulh(int64_t a, int64_t b) +{ + int negate = (a < 0) != (b < 0); + uint64_t res = mulhu(a < 0 ? -a : a, b < 0 ? -b : b); + return negate ? ~res + (a * b == 0) : res; +} + +inline int64_t mulhsu(int64_t a, uint64_t b) +{ + int negate = a < 0; + uint64_t res = mulhu(a < 0 ? -a : a, b); + return negate ? ~res + (a * b == 0) : res; +} + +//ref: https://locklessinc.com/articles/sat_arithmetic/ +template +static inline T sat_add(T x, T y, bool &sat) +{ + UT ux = x; + UT uy = y; + UT res = ux + uy; + sat = false; + int sh = sizeof(T) * 8 - 1; + + /* Calculate overflowed result. (Don't change the sign bit of ux) */ + ux = (ux >> sh) + (((UT)0x1 << sh) - 1); + + /* Force compiler to use cmovns instruction */ + if ((T) ((ux ^ uy) | ~(uy ^ res)) >= 0) { + res = ux; + sat = true; + } + + return res; +} + +template +static inline T sat_add(T x, T y, T z, bool &sat) +{ + bool sat1, sat2; + T a = y; + T b = z; + T res; + + /* Force compiler to use cmovs instruction */ + if (((y ^ z) & (x ^ z)) < 0) { + a = z; + b = y; + } + + res = sat_add(x, a, sat1); + res = sat_add(res, b, sat2); + sat = sat1 || sat2; + + return res; +} + +template +static inline T sat_sub(T x, T y, bool &sat) +{ + UT ux = x; + UT uy = y; + UT res = ux - uy; + sat = false; + int sh = sizeof(T) * 8 - 1; + + /* Calculate overflowed result. (Don't change the sign bit of ux) */ + ux = (ux >> sh) + (((UT)0x1 << sh) - 1); + + /* Force compiler to use cmovns instruction */ + if ((T) ((ux ^ uy) & (ux ^ res)) < 0) { + res = ux; + sat = true; + } + + return res; +} + +template +T sat_addu(T x, T y, bool &sat) +{ + T res = x + y; + sat = false; + + sat = res < x; + res |= -(res < x); + + return res; +} + +template +T sat_subu(T x, T y, bool &sat) +{ + T res = x - y; + sat = false; + + sat = !(res <= x); + res &= -(res <= x); + + return res; +} + +static inline uint64_t extract64(uint64_t val, int pos, int len) +{ + assert(pos >= 0 && len > 0 && len <= 64 - pos); + return (val >> pos) & (~UINT64_C(0) >> (64 - len)); +} + +static inline uint64_t make_mask64(int pos, int len) +{ + assert(pos >= 0 && len > 0 && pos < 64 && len <= 64); + return (UINT64_MAX >> (64 - len)) << pos; +} + +static inline int popcount(uint64_t val) +{ + val = (val & 0x5555555555555555U) + ((val >> 1) & 0x5555555555555555U); + val = (val & 0x3333333333333333U) + ((val >> 2) & 0x3333333333333333U); + val = (val & 0x0f0f0f0f0f0f0f0fU) + ((val >> 4) & 0x0f0f0f0f0f0f0f0fU); + val = (val & 0x00ff00ff00ff00ffU) + ((val >> 8) & 0x00ff00ff00ff00ffU); + val = (val & 0x0000ffff0000ffffU) + ((val >> 16) & 0x0000ffff0000ffffU); + val = (val & 0x00000000ffffffffU) + ((val >> 32) & 0x00000000ffffffffU); + return val; +} + +static inline int ctz(uint64_t val) +{ + if (!val) + return 0; + + int res = 0; + + if ((val << 32) == 0) res += 32, val >>= 32; + if ((val << 48) == 0) res += 16, val >>= 16; + if ((val << 56) == 0) res += 8, val >>= 8; + if ((val << 60) == 0) res += 4, val >>= 4; + if ((val << 62) == 0) res += 2, val >>= 2; + if ((val << 63) == 0) res += 1, val >>= 1; + + return res; +} + +static inline int clz(uint64_t val) +{ + if (!val) + return 0; + + int res = 0; + + if ((val >> 32) == 0) res += 32, val <<= 32; + if ((val >> 48) == 0) res += 16, val <<= 16; + if ((val >> 56) == 0) res += 8, val <<= 8; + if ((val >> 60) == 0) res += 4, val <<= 4; + if ((val >> 62) == 0) res += 2, val <<= 2; + if ((val >> 63) == 0) res += 1, val <<= 1; + + return res; +} + +static inline int log2(uint64_t val) +{ + if (!val) + return 0; + + return 63 - clz(val); +} + +static inline uint64_t xperm(uint64_t rs1, uint64_t rs2, size_t sz_log2, size_t len) +{ + uint64_t r = 0; + uint64_t sz = 1LL << sz_log2; + uint64_t mask = (1LL << sz) - 1; + + assert(sz_log2 <= 6 && len <= 64); + + for (size_t i = 0; i < len; i += sz) { + uint64_t pos = ((rs2 >> i) & mask) << sz_log2; + if (pos < len) + r |= ((rs1 >> pos) & mask) << i; + } + + return r; +} + +#endif diff --git a/vendor/riscv-isa-sim/riscv/cachesim.cc b/vendor/riscv-isa-sim/riscv/cachesim.cc new file mode 100644 index 00000000..48840cb4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/cachesim.cc @@ -0,0 +1,210 @@ +// See LICENSE for license details. + +#include "cachesim.h" +#include "common.h" +#include +#include +#include + +cache_sim_t::cache_sim_t(size_t _sets, size_t _ways, size_t _linesz, const char* _name) +: sets(_sets), ways(_ways), linesz(_linesz), name(_name), log(false) +{ + init(); +} + +static void help() +{ + std::cerr << "Cache configurations must be of the form" << std::endl; + std::cerr << " sets:ways:blocksize" << std::endl; + std::cerr << "where sets, ways, and blocksize are positive integers, with" << std::endl; + std::cerr << "sets and blocksize both powers of two and blocksize at least 8." << std::endl; + exit(1); +} + +cache_sim_t* cache_sim_t::construct(const char* config, const char* name) +{ + const char* wp = strchr(config, ':'); + if (!wp++) help(); + const char* bp = strchr(wp, ':'); + if (!bp++) help(); + + size_t sets = atoi(std::string(config, wp).c_str()); + size_t ways = atoi(std::string(wp, bp).c_str()); + size_t linesz = atoi(bp); + + if (ways > 4 /* empirical */ && sets == 1) + return new fa_cache_sim_t(ways, linesz, name); + return new cache_sim_t(sets, ways, linesz, name); +} + +void cache_sim_t::init() +{ + if(sets == 0 || (sets & (sets-1))) + help(); + if(linesz < 8 || (linesz & (linesz-1))) + help(); + + idx_shift = 0; + for (size_t x = linesz; x>1; x >>= 1) + idx_shift++; + + tags = new uint64_t[sets*ways](); + read_accesses = 0; + read_misses = 0; + bytes_read = 0; + write_accesses = 0; + write_misses = 0; + bytes_written = 0; + writebacks = 0; + + miss_handler = NULL; +} + +cache_sim_t::cache_sim_t(const cache_sim_t& rhs) + : sets(rhs.sets), ways(rhs.ways), linesz(rhs.linesz), + idx_shift(rhs.idx_shift), name(rhs.name), log(false) +{ + tags = new uint64_t[sets*ways]; + memcpy(tags, rhs.tags, sets*ways*sizeof(uint64_t)); +} + +cache_sim_t::~cache_sim_t() +{ + print_stats(); + delete [] tags; +} + +void cache_sim_t::print_stats() +{ + if(read_accesses + write_accesses == 0) + return; + + float mr = 100.0f*(read_misses+write_misses)/(read_accesses+write_accesses); + + std::cout << std::setprecision(3) << std::fixed; + std::cout << name << " "; + std::cout << "Bytes Read: " << bytes_read << std::endl; + std::cout << name << " "; + std::cout << "Bytes Written: " << bytes_written << std::endl; + std::cout << name << " "; + std::cout << "Read Accesses: " << read_accesses << std::endl; + std::cout << name << " "; + std::cout << "Write Accesses: " << write_accesses << std::endl; + std::cout << name << " "; + std::cout << "Read Misses: " << read_misses << std::endl; + std::cout << name << " "; + std::cout << "Write Misses: " << write_misses << std::endl; + std::cout << name << " "; + std::cout << "Writebacks: " << writebacks << std::endl; + std::cout << name << " "; + std::cout << "Miss Rate: " << mr << '%' << std::endl; +} + +uint64_t* cache_sim_t::check_tag(uint64_t addr) +{ + size_t idx = (addr >> idx_shift) & (sets-1); + size_t tag = (addr >> idx_shift) | VALID; + + for (size_t i = 0; i < ways; i++) + if (tag == (tags[idx*ways + i] & ~DIRTY)) + return &tags[idx*ways + i]; + + return NULL; +} + +uint64_t cache_sim_t::victimize(uint64_t addr) +{ + size_t idx = (addr >> idx_shift) & (sets-1); + size_t way = lfsr.next() % ways; + uint64_t victim = tags[idx*ways + way]; + tags[idx*ways + way] = (addr >> idx_shift) | VALID; + return victim; +} + +void cache_sim_t::access(uint64_t addr, size_t bytes, bool store) +{ + store ? write_accesses++ : read_accesses++; + (store ? bytes_written : bytes_read) += bytes; + + uint64_t* hit_way = check_tag(addr); + if (likely(hit_way != NULL)) + { + if (store) + *hit_way |= DIRTY; + return; + } + + store ? write_misses++ : read_misses++; + if (log) + { + std::cerr << name << " " + << (store ? "write" : "read") << " miss 0x" + << std::hex << addr << std::endl; + } + + uint64_t victim = victimize(addr); + + if ((victim & (VALID | DIRTY)) == (VALID | DIRTY)) + { + uint64_t dirty_addr = (victim & ~(VALID | DIRTY)) << idx_shift; + if (miss_handler) + miss_handler->access(dirty_addr, linesz, true); + writebacks++; + } + + if (miss_handler) + miss_handler->access(addr & ~(linesz-1), linesz, false); + + if (store) + *check_tag(addr) |= DIRTY; +} + +void cache_sim_t::clean_invalidate(uint64_t addr, size_t bytes, bool clean, bool inval) +{ + uint64_t start_addr = addr & ~(linesz-1); + uint64_t end_addr = (addr + bytes + linesz-1) & ~(linesz-1); + uint64_t cur_addr = start_addr; + while (cur_addr < end_addr) { + uint64_t* hit_way = check_tag(cur_addr); + if (likely(hit_way != NULL)) + { + if (clean) { + if (*hit_way & DIRTY) { + writebacks++; + *hit_way &= ~DIRTY; + } + } + + if (inval) + *hit_way &= ~VALID; + } + cur_addr += linesz; + } + if (miss_handler) + miss_handler->clean_invalidate(addr, bytes, clean, inval); +} + +fa_cache_sim_t::fa_cache_sim_t(size_t ways, size_t linesz, const char* name) + : cache_sim_t(1, ways, linesz, name) +{ +} + +uint64_t* fa_cache_sim_t::check_tag(uint64_t addr) +{ + auto it = tags.find(addr >> idx_shift); + return it == tags.end() ? NULL : &it->second; +} + +uint64_t fa_cache_sim_t::victimize(uint64_t addr) +{ + uint64_t old_tag = 0; + if (tags.size() == ways) + { + auto it = tags.begin(); + std::advance(it, lfsr.next() % ways); + old_tag = it->second; + tags.erase(it); + } + tags[addr >> idx_shift] = (addr >> idx_shift) | VALID; + return old_tag; +} diff --git a/vendor/riscv-isa-sim/riscv/cachesim.h b/vendor/riscv-isa-sim/riscv/cachesim.h new file mode 100644 index 00000000..b7f90143 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/cachesim.h @@ -0,0 +1,135 @@ +// See LICENSE for license details. + +#ifndef _RISCV_CACHE_SIM_H +#define _RISCV_CACHE_SIM_H + +#include "memtracer.h" +#include +#include +#include +#include + +class lfsr_t +{ + public: + lfsr_t() : reg(1) {} + lfsr_t(const lfsr_t& lfsr) : reg(lfsr.reg) {} + uint32_t next() { return reg = (reg>>1)^(-(reg&1) & 0xd0000001); } + private: + uint32_t reg; +}; + +class cache_sim_t +{ + public: + cache_sim_t(size_t sets, size_t ways, size_t linesz, const char* name); + cache_sim_t(const cache_sim_t& rhs); + virtual ~cache_sim_t(); + + void access(uint64_t addr, size_t bytes, bool store); + void clean_invalidate(uint64_t addr, size_t bytes, bool clean, bool inval); + void print_stats(); + void set_miss_handler(cache_sim_t* mh) { miss_handler = mh; } + void set_log(bool _log) { log = _log; } + + static cache_sim_t* construct(const char* config, const char* name); + + protected: + static const uint64_t VALID = 1ULL << 63; + static const uint64_t DIRTY = 1ULL << 62; + + virtual uint64_t* check_tag(uint64_t addr); + virtual uint64_t victimize(uint64_t addr); + + lfsr_t lfsr; + cache_sim_t* miss_handler; + + size_t sets; + size_t ways; + size_t linesz; + size_t idx_shift; + + uint64_t* tags; + + uint64_t read_accesses; + uint64_t read_misses; + uint64_t bytes_read; + uint64_t write_accesses; + uint64_t write_misses; + uint64_t bytes_written; + uint64_t writebacks; + + std::string name; + bool log; + + void init(); +}; + +class fa_cache_sim_t : public cache_sim_t +{ + public: + fa_cache_sim_t(size_t ways, size_t linesz, const char* name); + uint64_t* check_tag(uint64_t addr); + uint64_t victimize(uint64_t addr); + private: + static bool cmp(uint64_t a, uint64_t b); + std::map tags; +}; + +class cache_memtracer_t : public memtracer_t +{ + public: + cache_memtracer_t(const char* config, const char* name) + { + cache = cache_sim_t::construct(config, name); + } + ~cache_memtracer_t() + { + delete cache; + } + void set_miss_handler(cache_sim_t* mh) + { + cache->set_miss_handler(mh); + } + void clean_invalidate(uint64_t addr, size_t bytes, bool clean, bool inval) + { + cache->clean_invalidate(addr, bytes, clean, inval); + } + void set_log(bool log) + { + cache->set_log(log); + } + + protected: + cache_sim_t* cache; +}; + +class icache_sim_t : public cache_memtracer_t +{ + public: + icache_sim_t(const char* config) : cache_memtracer_t(config, "I$") {} + bool interested_in_range(uint64_t begin, uint64_t end, access_type type) + { + return type == FETCH; + } + void trace(uint64_t addr, size_t bytes, access_type type) + { + if (type == FETCH) cache->access(addr, bytes, false); + } +}; + +class dcache_sim_t : public cache_memtracer_t +{ + public: + dcache_sim_t(const char* config) : cache_memtracer_t(config, "D$") {} + bool interested_in_range(uint64_t begin, uint64_t end, access_type type) + { + return type == LOAD || type == STORE; + } + void trace(uint64_t addr, size_t bytes, access_type type) + { + if (type == LOAD || type == STORE) cache->access(addr, bytes, type == STORE); + } +}; + +#endif diff --git a/vendor/riscv-isa-sim/riscv/cfg.h b/vendor/riscv-isa-sim/riscv/cfg.h new file mode 100644 index 00000000..6369bd84 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/cfg.h @@ -0,0 +1,88 @@ +// See LICENSE for license details. +#ifndef _RISCV_CFG_H +#define _RISCV_CFG_H + +#include +#include "decode.h" +#include "mmu.h" +#include + +template +class cfg_arg_t { +public: + cfg_arg_t(T default_val) + : value(default_val), was_set(false) {} + + bool overridden() const { return was_set; } + + T operator()() const { return value; } + + T operator=(const T v) { + value = v; + was_set = true; + return value; + } + +private: + T value; + bool was_set; +}; + +// Configuration that describes a memory region +class mem_cfg_t +{ +public: + mem_cfg_t(reg_t base, reg_t size) + : base(base), size(size) + { + // The truth of these assertions should be ensured by whatever is creating + // the regions in the first place, but we have them here to make sure that + // we can't end up describing memory regions that don't make sense. They + // ask that the page size is a multiple of the minimum page size, that the + // page is aligned to the minimum page size, that the page is non-empty and + // that the top address is still representable in a reg_t. + assert((size % PGSIZE == 0) && + (base % PGSIZE == 0) && + (base + size > base)); + } + + reg_t base; + reg_t size; +}; + +class cfg_t +{ +public: + cfg_t(std::pair default_initrd_bounds, + const char *default_bootargs, + const char *default_isa, const char *default_priv, + const char *default_varch, + const std::vector &default_mem_layout, + const std::vector default_hartids, + bool default_real_time_clint) + : initrd_bounds(default_initrd_bounds), + bootargs(default_bootargs), + isa(default_isa), + priv(default_priv), + varch(default_varch), + mem_layout(default_mem_layout), + hartids(default_hartids), + explicit_hartids(false), + real_time_clint(default_real_time_clint) + {} + + cfg_arg_t> initrd_bounds; + cfg_arg_t bootargs; + cfg_arg_t isa; + cfg_arg_t priv; + cfg_arg_t varch; + cfg_arg_t> mem_layout; + std::optional start_pc; + cfg_arg_t> hartids; + bool explicit_hartids; + cfg_arg_t real_time_clint; + + size_t nprocs() const { return hartids().size(); } +}; + +#endif diff --git a/vendor/riscv-isa-sim/riscv/clint.cc b/vendor/riscv-isa-sim/riscv/clint.cc new file mode 100644 index 00000000..72d1bbeb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/clint.cc @@ -0,0 +1,89 @@ +#include +#include "devices.h" +#include "processor.h" + +clint_t::clint_t(std::vector& procs, uint64_t freq_hz, bool real_time) + : procs(procs), freq_hz(freq_hz), real_time(real_time), mtime(0), mtimecmp(procs.size()) +{ + struct timeval base; + + gettimeofday(&base, NULL); + + real_time_ref_secs = base.tv_sec; + real_time_ref_usecs = base.tv_usec; +} + +/* 0000 msip hart 0 + * 0004 msip hart 1 + * 4000 mtimecmp hart 0 lo + * 4004 mtimecmp hart 0 hi + * 4008 mtimecmp hart 1 lo + * 400c mtimecmp hart 1 hi + * bff8 mtime lo + * bffc mtime hi + */ + +#define MSIP_BASE 0x0 +#define MTIMECMP_BASE 0x4000 +#define MTIME_BASE 0xbff8 + +bool clint_t::load(reg_t addr, size_t len, uint8_t* bytes) +{ + increment(0); + if (addr >= MSIP_BASE && addr + len <= MSIP_BASE + procs.size()*sizeof(msip_t)) { + std::vector msip(procs.size()); + for (size_t i = 0; i < procs.size(); ++i) + msip[i] = !!(procs[i]->state.mip->read() & MIP_MSIP); + memcpy(bytes, (uint8_t*)&msip[0] + addr - MSIP_BASE, len); + } else if (addr >= MTIMECMP_BASE && addr + len <= MTIMECMP_BASE + procs.size()*sizeof(mtimecmp_t)) { + memcpy(bytes, (uint8_t*)&mtimecmp[0] + addr - MTIMECMP_BASE, len); + } else if (addr >= MTIME_BASE && addr + len <= MTIME_BASE + sizeof(mtime_t)) { + memcpy(bytes, (uint8_t*)&mtime + addr - MTIME_BASE, len); + } else { + return false; + } + return true; +} + +bool clint_t::store(reg_t addr, size_t len, const uint8_t* bytes) +{ + if (addr >= MSIP_BASE && addr + len <= MSIP_BASE + procs.size()*sizeof(msip_t)) { + std::vector msip(procs.size()); + std::vector mask(procs.size(), 0); + memcpy((uint8_t*)&msip[0] + addr - MSIP_BASE, bytes, len); + memset((uint8_t*)&mask[0] + addr - MSIP_BASE, 0xff, len); + for (size_t i = 0; i < procs.size(); ++i) { + if (!(mask[i] & 0xFF)) continue; + procs[i]->state.mip->backdoor_write_with_mask(MIP_MSIP, 0); + if (!!(msip[i] & 1)) + procs[i]->state.mip->backdoor_write_with_mask(MIP_MSIP, MIP_MSIP); + } + } else if (addr >= MTIMECMP_BASE && addr + len <= MTIMECMP_BASE + procs.size()*sizeof(mtimecmp_t)) { + memcpy((uint8_t*)&mtimecmp[0] + addr - MTIMECMP_BASE, bytes, len); + } else if (addr >= MTIME_BASE && addr + len <= MTIME_BASE + sizeof(mtime_t)) { + memcpy((uint8_t*)&mtime + addr - MTIME_BASE, bytes, len); + } else { + return false; + } + increment(0); + return true; +} + +void clint_t::increment(reg_t inc) +{ + if (real_time) { + struct timeval now; + uint64_t diff_usecs; + + gettimeofday(&now, NULL); + diff_usecs = ((now.tv_sec - real_time_ref_secs) * 1000000) + (now.tv_usec - real_time_ref_usecs); + mtime = diff_usecs * freq_hz / 1000000; + } else { + mtime += inc; + } + for (size_t i = 0; i < procs.size(); i++) { + procs[i]->state.mip->backdoor_write_with_mask(MIP_MTIP, 0); + if (mtime >= mtimecmp[i]) + procs[i]->state.mip->backdoor_write_with_mask(MIP_MTIP, MIP_MTIP); + } +} diff --git a/vendor/riscv-isa-sim/riscv/common.h b/vendor/riscv-isa-sim/riscv/common.h new file mode 100644 index 00000000..002a83f0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/common.h @@ -0,0 +1,18 @@ +// See LICENSE for license details. + +#ifndef _RISCV_COMMON_H +#define _RISCV_COMMON_H + +#ifdef __GNUC__ +# define likely(x) __builtin_expect(x, 1) +# define unlikely(x) __builtin_expect(x, 0) +# define NOINLINE __attribute__ ((noinline)) +# define NORETURN __attribute__ ((noreturn)) +#else +# define likely(x) (x) +# define unlikely(x) (x) +# define NOINLINE +# define NORETURN +#endif + +#endif diff --git a/vendor/riscv-isa-sim/riscv/csrs.cc b/vendor/riscv-isa-sim/riscv/csrs.cc new file mode 100644 index 00000000..f31022fc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/csrs.cc @@ -0,0 +1,1297 @@ +// See LICENSE for license details. + +#include "csrs.h" +// For processor_t: +#include "processor.h" +#include "mmu.h" +// For get_field(): +#include "decode.h" +// For trap_virtual_instruction and trap_illegal_instruction: +#include "trap.h" +// For require(): +#include "insn_macros.h" + +// STATE macro used by require_privilege() macro: +#undef STATE +#define STATE (*state) + + +// implement class csr_t +csr_t::csr_t(processor_t* const proc, const reg_t addr): + proc(proc), + state(proc->get_state()), + address(addr), + csr_priv(get_field(addr, 0x300)), + csr_read_only(get_field(addr, 0xC00) == 3) { +} + +void csr_t::verify_permissions(insn_t insn, bool write) const { + // Check permissions. Raise virtual-instruction exception if V=1, + // privileges are insufficient, and the CSR belongs to supervisor or + // hypervisor. Raise illegal-instruction exception otherwise. + unsigned priv = state->prv == PRV_S && !state->v ? PRV_HS : state->prv; + + if ((csr_priv == PRV_S && !proc->extension_enabled('S')) || + (csr_priv == PRV_HS && !proc->extension_enabled('H'))) + throw trap_illegal_instruction(insn.bits()); + + if (write && csr_read_only) + throw trap_illegal_instruction(insn.bits()); + if (priv < csr_priv) { + if (state->v && csr_priv <= PRV_HS) + throw trap_virtual_instruction(insn.bits()); + throw trap_illegal_instruction(insn.bits()); + } +} + + +csr_t::~csr_t() { +} + +void csr_t::write(const reg_t val) noexcept { + const bool success = unlogged_write(val); + if (success) { + log_write(); + } +} + +void csr_t::log_write() const noexcept { + log_special_write(address, written_value()); +} + +void csr_t::log_special_write(const reg_t address, const reg_t val) const noexcept { +#if defined(RISCV_ENABLE_COMMITLOG) + proc->get_state()->log_reg_write[((address) << 4) | 4] = {val, 0}; +#endif +} + +reg_t csr_t::written_value() const noexcept { + return read(); +} + +// implement class basic_csr_t +basic_csr_t::basic_csr_t(processor_t* const proc, const reg_t addr, const reg_t init): + csr_t(proc, addr), + val(init) { +} + +bool basic_csr_t::unlogged_write(const reg_t val) noexcept { + this->val = val; + return true; +} + + +// implement class pmpaddr_csr_t +pmpaddr_csr_t::pmpaddr_csr_t(processor_t* const proc, const reg_t addr): + csr_t(proc, addr), + val(0), + cfg(0), + pmpidx(address - CSR_PMPADDR0) { +} + + +void pmpaddr_csr_t::verify_permissions(insn_t insn, bool write) const { + csr_t::verify_permissions(insn, write); + // If n_pmp is zero, that means pmp is not implemented hence raise + // trap if it tries to access the csr. I would prefer to implement + // this by not instantiating any pmpaddr_csr_t for these regs, but + // n_pmp can change after reset() is run. + if (proc->n_pmp == 0) + throw trap_illegal_instruction(insn.bits()); +} + + +reg_t pmpaddr_csr_t::read() const noexcept { + if ((cfg & PMP_A) >= PMP_NAPOT) + return val | (~proc->pmp_tor_mask() >> 1); + return val & proc->pmp_tor_mask(); +} + + +bool pmpaddr_csr_t::unlogged_write(const reg_t val) noexcept { + // If no PMPs are configured, disallow access to all. Otherwise, + // allow access to all, but unimplemented ones are hardwired to + // zero. Note that n_pmp can change after reset(); otherwise I would + // implement this in state_t::reset() by instantiating the correct + // number of pmpaddr_csr_t. + if (proc->n_pmp == 0) + return false; + + bool lock_bypass = state->mseccfg->get_rlb(); + bool locked = (cfg & PMP_L) && !lock_bypass; + + if (pmpidx < proc->n_pmp && !locked && !next_locked_and_tor()) { + this->val = val & ((reg_t(1) << (MAX_PADDR_BITS - PMP_SHIFT)) - 1); + } + else + return false; + proc->get_mmu()->flush_tlb(); + return true; +} + +bool pmpaddr_csr_t::next_locked_and_tor() const noexcept { + if (pmpidx+1 >= state->max_pmp) return false; // this is the last entry + bool lock_bypass = state->mseccfg->get_rlb(); + bool next_locked = (state->pmpaddr[pmpidx+1]->cfg & PMP_L) && !lock_bypass; + bool next_tor = (state->pmpaddr[pmpidx+1]->cfg & PMP_A) == PMP_TOR; + return next_locked && next_tor; +} + + +reg_t pmpaddr_csr_t::tor_paddr() const noexcept { + return (val & proc->pmp_tor_mask()) << PMP_SHIFT; +} + + +reg_t pmpaddr_csr_t::tor_base_paddr() const noexcept { + if (pmpidx == 0) return 0; // entry 0 always uses 0 as base + return state->pmpaddr[pmpidx-1]->tor_paddr(); +} + + +reg_t pmpaddr_csr_t::napot_mask() const noexcept { + bool is_na4 = (cfg & PMP_A) == PMP_NA4; + reg_t mask = (val << 1) | (!is_na4) | ~proc->pmp_tor_mask(); + return ~(mask & ~(mask + 1)) << PMP_SHIFT; +} + + +bool pmpaddr_csr_t::match4(reg_t addr) const noexcept { + if ((cfg & PMP_A) == 0) return false; + bool is_tor = (cfg & PMP_A) == PMP_TOR; + if (is_tor) return tor_base_paddr() <= addr && addr < tor_paddr(); + // NAPOT or NA4: + return ((addr ^ tor_paddr()) & napot_mask()) == 0; +} + + +bool pmpaddr_csr_t::subset_match(reg_t addr, reg_t len) const noexcept { + if ((addr | len) & (len - 1)) + abort(); + reg_t base = tor_base_paddr(); + reg_t tor = tor_paddr(); + + if ((cfg & PMP_A) == 0) return false; + + bool is_tor = (cfg & PMP_A) == PMP_TOR; + bool begins_after_lower = addr >= base; + bool begins_after_upper = addr >= tor; + bool ends_before_lower = (addr & -len) < (base & -len); + bool ends_before_upper = (addr & -len) < (tor & -len); + bool tor_homogeneous = ends_before_lower || begins_after_upper || + (begins_after_lower && ends_before_upper); + + bool mask_homogeneous = ~(napot_mask() << 1) & len; + bool napot_homogeneous = mask_homogeneous || ((addr ^ tor) / len) != 0; + + return !(is_tor ? tor_homogeneous : napot_homogeneous); +} + + +bool pmpaddr_csr_t::access_ok(access_type type, reg_t mode) const noexcept { + bool cfgx = cfg & PMP_X; + bool cfgw = cfg & PMP_W; + bool cfgr = cfg & PMP_R; + bool cfgl = cfg & PMP_L; + + bool prvm = mode == PRV_M; + + bool typer = type == LOAD; + bool typex = type == FETCH; + bool typew = type == STORE; + bool normal_rwx = (typer && cfgr) || (typew && cfgw) || (typex && cfgx); + bool mseccfg_mml = state->mseccfg->get_mml(); + + if (mseccfg_mml) { + if (cfgx && cfgw && cfgr && cfgl) { + // Locked Shared data region: Read only on both M and S/U mode. + return typer; + } else { + bool mml_shared_region = !cfgr && cfgw; + bool mml_chk_normal = (prvm == cfgl) && normal_rwx; + bool mml_chk_shared = + (!cfgl && cfgx && (typer || typew)) || + (!cfgl && !cfgx && (typer || (typew && prvm))) || + (cfgl && typex) || + (cfgl && typer && cfgx && prvm); + return mml_shared_region ? mml_chk_shared : mml_chk_normal; + } + } else { + bool m_bypass = (prvm && !cfgl); + return m_bypass || normal_rwx; + } +} + + +// implement class pmpcfg_csr_t +pmpcfg_csr_t::pmpcfg_csr_t(processor_t* const proc, const reg_t addr): + csr_t(proc, addr) { +} + +reg_t pmpcfg_csr_t::read() const noexcept { + reg_t cfg_res = 0; + for (size_t i0 = (address - CSR_PMPCFG0) * 4, i = i0; i < i0 + proc->get_xlen() / 8 && i < state->max_pmp; i++) + cfg_res |= reg_t(state->pmpaddr[i]->cfg) << (8 * (i - i0)); + return cfg_res; +} + +bool pmpcfg_csr_t::unlogged_write(const reg_t val) noexcept { + if (proc->n_pmp == 0) + return false; + + bool write_success = false; + bool rlb = state->mseccfg->get_rlb(); + bool mml = state->mseccfg->get_mml(); + for (size_t i0 = (address - CSR_PMPCFG0) * 4, i = i0; i < i0 + proc->get_xlen() / 8; i++) { + if (i < proc->n_pmp) { + bool locked = (state->pmpaddr[i]->cfg & PMP_L); + bool next_locked = (i+1 < proc->n_pmp) && (state->pmpaddr[i+1]->cfg & PMP_L); + bool next_tor = (i+1 < proc->n_pmp) && (state->pmpaddr[i+1]->cfg & PMP_A) == PMP_TOR; + + if (rlb || (!locked && !(next_locked && next_tor))) { + uint8_t cfg = (val >> (8 * (i - i0))) & (PMP_R | PMP_W | PMP_X | PMP_A | PMP_L); + // Drop R=0 W=1 when MML = 0 + // Remove the restriction when MML = 1 + if (!mml) { + cfg &= ~PMP_W | ((cfg & PMP_R) ? PMP_W : 0); + } + // Disallow A=NA4 when granularity > 4 + if (proc->lg_pmp_granularity != PMP_SHIFT && (cfg & PMP_A) == PMP_NA4) + cfg |= PMP_NAPOT; + /* + Adding a rule with executable privileges that either is M-mode-only or a locked Shared-Region is not possible + and such pmpcfg writes are ignored, leaving pmpcfg unchanged. + This restriction can be temporarily lifted e.g. during the boot process, by setting mseccfg.RLB. + */ + if (rlb || !(mml && ((cfg & PMP_L) && ((cfg & PMP_X) || ((cfg & PMP_W) && !(cfg & PMP_R)))))) + state->pmpaddr[i]->cfg = cfg; + } + + write_success = true; + + state->mseccfg->pmplock_recorded &= ~(1ULL << i); + if (state->pmpaddr[i]->cfg & PMP_L) { + state->mseccfg->pmplock_recorded |= (1ULL << i); + } + } + } + proc->get_mmu()->flush_tlb(); + return write_success; +} + +// implement class mseccfg_csr_t +mseccfg_csr_t::mseccfg_csr_t(processor_t* const proc, const reg_t addr): + csr_t(proc, addr), + mseccfg_val(0), + pmplock_recorded(0) { +} + +bool mseccfg_csr_t::get_mml() const noexcept { + return (mseccfg_val & MSECCFG_MML); +} + +bool mseccfg_csr_t::get_mmwp() const noexcept { + return (mseccfg_val & MSECCFG_MMWP); +} + +bool mseccfg_csr_t::get_rlb() const noexcept { + return (mseccfg_val & MSECCFG_RLB); +} + +reg_t mseccfg_csr_t::read() const noexcept { + return mseccfg_val; +} + +bool mseccfg_csr_t::unlogged_write(const reg_t val) noexcept { + if (proc->n_pmp == 0) + return false; + + //When mseccfg.RLB is 0 and pmpcfg.L is 1 in any rule or entry (including disabled entries) + if (!(pmplock_recorded && (mseccfg_val & MSECCFG_RLB)==0)) { + mseccfg_val &= ~MSECCFG_RLB; + mseccfg_val |= (val & MSECCFG_RLB); + } + + mseccfg_val |= (val & MSECCFG_MMWP); //MMWP is sticky + mseccfg_val |= (val & MSECCFG_MML); //MML is sticky + + proc->get_mmu()->flush_tlb(); + + return true; +} + +// implement class virtualized_csr_t +virtualized_csr_t::virtualized_csr_t(processor_t* const proc, csr_t_p orig, csr_t_p virt): + csr_t(proc, orig->address), + orig_csr(orig), + virt_csr(virt) { +} + + +reg_t virtualized_csr_t::read() const noexcept { + return readvirt(state->v); +} + +reg_t virtualized_csr_t::readvirt(bool virt) const noexcept { + return virt ? virt_csr->read() : orig_csr->read(); +} + +bool virtualized_csr_t::unlogged_write(const reg_t val) noexcept { + if (state->v) + virt_csr->write(val); + else + orig_csr->write(val); + return false; // virt_csr or orig_csr has already logged +} + + +// implement class epc_csr_t +epc_csr_t::epc_csr_t(processor_t* const proc, const reg_t addr): + csr_t(proc, addr), + val(0) { +} + + +reg_t epc_csr_t::read() const noexcept { + return val & proc->pc_alignment_mask(); +} + + +bool epc_csr_t::unlogged_write(const reg_t val) noexcept { + this->val = val & ~(reg_t)1; + return true; +} + + +// implement class tvec_csr_t +tvec_csr_t::tvec_csr_t(processor_t* const proc, const reg_t addr): + csr_t(proc, addr), + val(0) { +} + + +reg_t tvec_csr_t::read() const noexcept { + return val; +} + + +bool tvec_csr_t::unlogged_write(const reg_t val) noexcept { + this->val = val & ~(reg_t)2; + return true; +} + + +// implement class cause_csr_t +cause_csr_t::cause_csr_t(processor_t* const proc, const reg_t addr): + basic_csr_t(proc, addr, 0) { +} + + +reg_t cause_csr_t::read() const noexcept { + reg_t val = basic_csr_t::read(); + // When reading, the interrupt bit needs to adjust to xlen. Spike does + // not generally support dynamic xlen, but this code was (partly) + // there since at least 2015 (ea58df8 and c4350ef). + if (proc->get_isa().get_max_xlen() > proc->get_xlen()) // Move interrupt bit to top of xlen + return val | ((val >> (proc->get_isa().get_max_xlen()-1)) << (proc->get_xlen()-1)); + return val; +} + + +// implement class base_status_csr_t +base_status_csr_t::base_status_csr_t(processor_t* const proc, const reg_t addr): + csr_t(proc, addr), + has_page(proc->extension_enabled_const('S') && proc->supports_impl(IMPL_MMU)), + sstatus_write_mask(compute_sstatus_write_mask()), + sstatus_read_mask(sstatus_write_mask | SSTATUS_UBE | SSTATUS_UXL + | (proc->get_const_xlen() == 32 ? SSTATUS32_SD : SSTATUS64_SD)) { +} + + +reg_t base_status_csr_t::compute_sstatus_write_mask() const noexcept { + // If a configuration has FS bits, they will always be accessible no + // matter the state of misa. + const bool has_fs = proc->extension_enabled('S') || proc->extension_enabled('F') + || proc->extension_enabled('V'); + const bool has_vs = proc->extension_enabled('V'); + return 0 + | (proc->extension_enabled('S') ? (SSTATUS_SIE | SSTATUS_SPIE | SSTATUS_SPP) : 0) + | (has_page ? (SSTATUS_SUM | SSTATUS_MXR) : 0) + | (has_fs ? SSTATUS_FS : 0) + | (proc->any_custom_extensions() ? SSTATUS_XS : 0) + | (has_vs ? SSTATUS_VS : 0) + ; +} + + +reg_t base_status_csr_t::adjust_sd(const reg_t val) const noexcept { + // This uses get_const_xlen() instead of get_xlen() not only because + // the variable is static, so it's only called once, but also + // because the SD bit moves when XLEN changes, which means we would + // need to call adjust_sd() on every read, instead of on every + // write. + static const reg_t sd_bit = proc->get_const_xlen() == 64 ? SSTATUS64_SD : SSTATUS32_SD; + if (((val & SSTATUS_FS) == SSTATUS_FS) || + ((val & SSTATUS_VS) == SSTATUS_VS) || + ((val & SSTATUS_XS) == SSTATUS_XS)) { + return val | sd_bit; + } + return val & ~sd_bit; +} + + +void base_status_csr_t::maybe_flush_tlb(const reg_t newval) noexcept { + if ((newval ^ read()) & + (MSTATUS_MPP | MSTATUS_MPRV + | (has_page ? (MSTATUS_MXR | MSTATUS_SUM) : 0) + )) + proc->get_mmu()->flush_tlb(); +} + + +namespace { + int xlen_to_uxl(int xlen) { + if (xlen == 32) + return 1; + if (xlen == 64) + return 2; + abort(); + } +} + + +// implement class vsstatus_csr_t +vsstatus_csr_t::vsstatus_csr_t(processor_t* const proc, const reg_t addr): + base_status_csr_t(proc, addr), + val(proc->get_state()->mstatus->read() & sstatus_read_mask) { +} + +bool vsstatus_csr_t::unlogged_write(const reg_t val) noexcept { + const reg_t newval = (this->val & ~sstatus_write_mask) | (val & sstatus_write_mask); + if (state->v) maybe_flush_tlb(newval); + this->val = adjust_sd(newval); + return true; +} + + +// implement class sstatus_proxy_csr_t +sstatus_proxy_csr_t::sstatus_proxy_csr_t(processor_t* const proc, const reg_t addr, mstatus_csr_t_p mstatus): + base_status_csr_t(proc, addr), + mstatus(mstatus) { +} + +bool sstatus_proxy_csr_t::unlogged_write(const reg_t val) noexcept { + const reg_t new_mstatus = (mstatus->read() & ~sstatus_write_mask) | (val & sstatus_write_mask); + + mstatus->write(new_mstatus); + return false; // avoid double logging: already logged by mstatus->write() +} + + +// implement class mstatus_csr_t +mstatus_csr_t::mstatus_csr_t(processor_t* const proc, const reg_t addr): + base_status_csr_t(proc, addr), + val(0 + | (proc->extension_enabled_const('U') && (proc->get_const_xlen() != 32) ? set_field((reg_t)0, MSTATUS_UXL, xlen_to_uxl(proc->get_const_xlen())) : 0) + | (proc->extension_enabled_const('S') && (proc->get_const_xlen() != 32) ? set_field((reg_t)0, MSTATUS_SXL, xlen_to_uxl(proc->get_const_xlen())) : 0) + +#ifdef RISCV_ENABLE_DUAL_ENDIAN + | (proc->get_mmu()->is_target_big_endian() ? MSTATUS_UBE | MSTATUS_SBE | MSTATUS_MBE : 0) +#endif + | 0 // initial value for mstatus + ) { +} + + +bool mstatus_csr_t::unlogged_write(const reg_t val) noexcept { + const bool has_mpv = proc->extension_enabled('S') && proc->extension_enabled('H'); + const bool has_gva = has_mpv; + + const reg_t mask = sstatus_write_mask + | MSTATUS_MIE | MSTATUS_MPIE | MSTATUS_MPRV + | MSTATUS_MPP | MSTATUS_TW + | (proc->extension_enabled('S') ? MSTATUS_TSR : 0) + | (has_page ? MSTATUS_TVM : 0) + | (has_gva ? MSTATUS_GVA : 0) + | (has_mpv ? MSTATUS_MPV : 0); + + const reg_t requested_mpp = proc->legalize_privilege(get_field(val, MSTATUS_MPP)); + const reg_t adjusted_val = set_field(val, MSTATUS_MPP, requested_mpp); + const reg_t new_mstatus = (read() & ~mask) | (adjusted_val & mask); + maybe_flush_tlb(new_mstatus); + this->val = adjust_sd(new_mstatus); + return true; +} + +// implement class mstatush_csr_t +mstatush_csr_t::mstatush_csr_t(processor_t* const proc, const reg_t addr, mstatus_csr_t_p mstatus): + csr_t(proc, addr), + mstatus(mstatus), + mask(MSTATUSH_MPV | MSTATUSH_GVA | MSTATUSH_SBE | MSTATUSH_MBE) { +} + +reg_t mstatush_csr_t::read() const noexcept { + return (mstatus->read() >> 32) & mask; +} + +bool mstatush_csr_t::unlogged_write(const reg_t val) noexcept { + return mstatus->unlogged_write((mstatus->written_value() & ~(mask << 32)) | ((val & mask) << 32)); +} + +// implement class sstatus_csr_t +sstatus_csr_t::sstatus_csr_t(processor_t* const proc, sstatus_proxy_csr_t_p orig, vsstatus_csr_t_p virt): + virtualized_csr_t(proc, orig, virt), + orig_sstatus(orig), + virt_sstatus(virt) { +} + +void sstatus_csr_t::dirty(const reg_t dirties) { + // As an optimization, return early if already dirty. + if ((orig_sstatus->read() & dirties) == dirties) { + if (likely(!state->v || (virt_sstatus->read() & dirties) == dirties)) + return; + } + + // Catch problems like #823 where P-extension instructions were not + // checking for mstatus.VS!=Off: + if (!enabled(dirties)) abort(); + + orig_sstatus->write(orig_sstatus->read() | dirties); + if (state->v) { + virt_sstatus->write(virt_sstatus->read() | dirties); + } +} + +bool sstatus_csr_t::enabled(const reg_t which) { + if ((orig_sstatus->read() & which) != 0) { + if (!state->v || (virt_sstatus->read() & which) != 0) + return true; + } + + // If the field doesn't exist, it is always enabled. See #823. + if (!orig_sstatus->field_exists(which)) + return true; + + return false; +} + + +// implement class misa_csr_t +misa_csr_t::misa_csr_t(processor_t* const proc, const reg_t addr, const reg_t max_isa): + basic_csr_t(proc, addr, max_isa), + max_isa(max_isa), + write_mask(max_isa & (0 // allow MAFDQCHV bits in MISA to be modified + | (1L << ('M' - 'A')) + | (1L << ('A' - 'A')) + | (1L << ('F' - 'A')) + | (1L << ('D' - 'A')) + | (1L << ('Q' - 'A')) + | (1L << ('C' - 'A')) + | (1L << ('H' - 'A')) + | (1L << ('V' - 'A')) + ) + ) { +} + +const reg_t misa_csr_t::dependency(const reg_t val, const char feature, const char depends_on) const noexcept { + return (val & (1L << (depends_on - 'A'))) ? val : (val & ~(1L << (feature - 'A'))); +} + +bool misa_csr_t::unlogged_write(const reg_t val) noexcept { + // the write is ignored if increasing IALIGN would misalign the PC + if (!(val & (1L << ('C' - 'A'))) && (state->pc & 2)) + return false; + + reg_t adjusted_val = val; + adjusted_val = dependency(adjusted_val, 'D', 'F'); + adjusted_val = dependency(adjusted_val, 'Q', 'D'); + adjusted_val = dependency(adjusted_val, 'V', 'D'); + + const reg_t old_misa = read(); + const bool prev_h = old_misa & (1L << ('H' - 'A')); + const reg_t new_misa = (adjusted_val & write_mask) | (old_misa & ~write_mask); + const bool new_h = new_misa & (1L << ('H' - 'A')); + + // update the hypervisor-only bits in MEDELEG and other CSRs + if (!new_h && prev_h) { + reg_t hypervisor_exceptions = 0 + | (1 << CAUSE_VIRTUAL_SUPERVISOR_ECALL) + | (1 << CAUSE_FETCH_GUEST_PAGE_FAULT) + | (1 << CAUSE_LOAD_GUEST_PAGE_FAULT) + | (1 << CAUSE_VIRTUAL_INSTRUCTION) + | (1 << CAUSE_STORE_GUEST_PAGE_FAULT) + ; + state->medeleg->write(state->medeleg->read() & ~hypervisor_exceptions); + state->mstatus->write(state->mstatus->read() & ~(MSTATUS_GVA | MSTATUS_MPV)); + state->mie->write_with_mask(MIP_HS_MASK, 0); // also takes care of hie, sie + state->mip->write_with_mask(MIP_HS_MASK, 0); // also takes care of hip, sip, hvip + state->hstatus->write(0); + } + + return basic_csr_t::unlogged_write(new_misa); +} + +bool misa_csr_t::extension_enabled_const(unsigned char ext) const noexcept { + assert(!(1 & (write_mask >> (ext - 'A')))); + return extension_enabled(ext); +} + + +// implement class mip_or_mie_csr_t +mip_or_mie_csr_t::mip_or_mie_csr_t(processor_t* const proc, const reg_t addr): + csr_t(proc, addr), + val(0) { +} + +reg_t mip_or_mie_csr_t::read() const noexcept { + return val; +} + +void mip_or_mie_csr_t::write_with_mask(const reg_t mask, const reg_t val) noexcept { + this->val = (this->val & ~mask) | (val & mask); + log_write(); +} + +bool mip_or_mie_csr_t::unlogged_write(const reg_t val) noexcept { + write_with_mask(write_mask(), val); + return false; // avoid double logging: already logged by write_with_mask() +} + + +mip_csr_t::mip_csr_t(processor_t* const proc, const reg_t addr): + mip_or_mie_csr_t(proc, addr) { +} + +void mip_csr_t::backdoor_write_with_mask(const reg_t mask, const reg_t val) noexcept { + this->val = (this->val & ~mask) | (val & mask); +} + +reg_t mip_csr_t::write_mask() const noexcept { + const reg_t supervisor_ints = proc->extension_enabled('S') ? MIP_SSIP | MIP_STIP | MIP_SEIP : 0; + const reg_t vssip_int = proc->extension_enabled('H') ? MIP_VSSIP : 0; + const reg_t hypervisor_ints = proc->extension_enabled('H') ? MIP_HS_MASK : 0; + // We must mask off sgeip, vstip, and vseip. All three of these + // bits are aliases for the same bits in hip. The hip spec says: + // * sgeip is read-only -- write hgeip instead + // * vseip is read-only -- write hvip instead + // * vstip is read-only -- write hvip instead + return (supervisor_ints | hypervisor_ints) & + (MIP_SEIP | MIP_SSIP | MIP_STIP | vssip_int); +} + + +mie_csr_t::mie_csr_t(processor_t* const proc, const reg_t addr): + mip_or_mie_csr_t(proc, addr) { +} + + +reg_t mie_csr_t::write_mask() const noexcept { + const reg_t supervisor_ints = proc->extension_enabled('S') ? MIP_SSIP | MIP_STIP | MIP_SEIP : 0; + const reg_t hypervisor_ints = proc->extension_enabled('H') ? MIP_HS_MASK : 0; + const reg_t coprocessor_ints = (reg_t)proc->any_custom_extensions() << IRQ_COP; + const reg_t delegable_ints = supervisor_ints | coprocessor_ints; + const reg_t all_ints = delegable_ints | hypervisor_ints | MIP_MSIP | MIP_MTIP | MIP_MEIP; + return all_ints; +} + + +// implement class generic_int_accessor_t +generic_int_accessor_t::generic_int_accessor_t(state_t* const state, + const reg_t read_mask, + const reg_t ip_write_mask, + const reg_t ie_write_mask, + const mask_mode_t mask_mode, + const int shiftamt): + state(state), + read_mask(read_mask), + ip_write_mask(ip_write_mask), + ie_write_mask(ie_write_mask), + mask_mideleg(mask_mode == MIDELEG), + mask_hideleg(mask_mode == HIDELEG), + shiftamt(shiftamt) { +} + +reg_t generic_int_accessor_t::ip_read() const noexcept { + return (state->mip->read() & deleg_mask() & read_mask) >> shiftamt; +} + +void generic_int_accessor_t::ip_write(const reg_t val) noexcept { + const reg_t mask = deleg_mask() & ip_write_mask; + state->mip->write_with_mask(mask, val << shiftamt); +} + +reg_t generic_int_accessor_t::ie_read() const noexcept { + return (state->mie->read() & deleg_mask() & read_mask) >> shiftamt; +} + +void generic_int_accessor_t::ie_write(const reg_t val) noexcept { + const reg_t mask = deleg_mask() & ie_write_mask; + state->mie->write_with_mask(mask, val << shiftamt); +} + +reg_t generic_int_accessor_t::deleg_mask() const { + const reg_t hideleg_mask = mask_hideleg ? state->hideleg->read() : (reg_t)~0; + const reg_t mideleg_mask = mask_mideleg ? state->mideleg->read() : (reg_t)~0; + return hideleg_mask & mideleg_mask; +} + + +// implement class mip_proxy_csr_t +mip_proxy_csr_t::mip_proxy_csr_t(processor_t* const proc, const reg_t addr, generic_int_accessor_t_p accr): + csr_t(proc, addr), + accr(accr) { +} + +reg_t mip_proxy_csr_t::read() const noexcept { + return accr->ip_read(); +} + +bool mip_proxy_csr_t::unlogged_write(const reg_t val) noexcept { + accr->ip_write(val); + return false; // accr has already logged +} + +// implement class mie_proxy_csr_t +mie_proxy_csr_t::mie_proxy_csr_t(processor_t* const proc, const reg_t addr, generic_int_accessor_t_p accr): + csr_t(proc, addr), + accr(accr) { +} + +reg_t mie_proxy_csr_t::read() const noexcept { + return accr->ie_read(); +} + +bool mie_proxy_csr_t::unlogged_write(const reg_t val) noexcept { + accr->ie_write(val); + return false; // accr has already logged +} + + +// implement class mideleg_csr_t +mideleg_csr_t::mideleg_csr_t(processor_t* const proc, const reg_t addr): + basic_csr_t(proc, addr, 0) { +} + +reg_t mideleg_csr_t::read() const noexcept { + reg_t val = basic_csr_t::read(); + if (proc->extension_enabled('H')) return val | MIDELEG_FORCED_MASK; + // No need to clear MIDELEG_FORCED_MASK because those bits can never + // get set in val. + return val; +} + +void mideleg_csr_t::verify_permissions(insn_t insn, bool write) const { + basic_csr_t::verify_permissions(insn, write); + if (!proc->extension_enabled('S')) + throw trap_illegal_instruction(insn.bits()); +} + +bool mideleg_csr_t::unlogged_write(const reg_t val) noexcept { + const reg_t supervisor_ints = proc->extension_enabled('S') ? MIP_SSIP | MIP_STIP | MIP_SEIP : 0; + const reg_t coprocessor_ints = (reg_t)proc->any_custom_extensions() << IRQ_COP; + const reg_t delegable_ints = supervisor_ints | coprocessor_ints; + + return basic_csr_t::unlogged_write(val & delegable_ints); +} + + +// implement class medeleg_csr_t +medeleg_csr_t::medeleg_csr_t(processor_t* const proc, const reg_t addr): + basic_csr_t(proc, addr, 0), + hypervisor_exceptions(0 + | (1 << CAUSE_VIRTUAL_SUPERVISOR_ECALL) + | (1 << CAUSE_FETCH_GUEST_PAGE_FAULT) + | (1 << CAUSE_LOAD_GUEST_PAGE_FAULT) + | (1 << CAUSE_VIRTUAL_INSTRUCTION) + | (1 << CAUSE_STORE_GUEST_PAGE_FAULT) + ) { +} + +void medeleg_csr_t::verify_permissions(insn_t insn, bool write) const { + basic_csr_t::verify_permissions(insn, write); + if (!proc->extension_enabled('S')) + throw trap_illegal_instruction(insn.bits()); +} + +bool medeleg_csr_t::unlogged_write(const reg_t val) noexcept { + const reg_t mask = 0 + | (1 << CAUSE_MISALIGNED_FETCH) + | (1 << CAUSE_BREAKPOINT) + | (1 << CAUSE_USER_ECALL) + | (1 << CAUSE_SUPERVISOR_ECALL) + | (1 << CAUSE_FETCH_PAGE_FAULT) + | (1 << CAUSE_LOAD_PAGE_FAULT) + | (1 << CAUSE_STORE_PAGE_FAULT) + | (proc->extension_enabled('H') ? hypervisor_exceptions : 0) + ; + return basic_csr_t::unlogged_write((read() & ~mask) | (val & mask)); +} + + +// implement class masked_csr_t +masked_csr_t::masked_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init): + basic_csr_t(proc, addr, init), + mask(mask) { +} + +bool masked_csr_t::unlogged_write(const reg_t val) noexcept { + return basic_csr_t::unlogged_write((read() & ~mask) | (val & mask)); +} + + +// implement class base_atp_csr_t and family +base_atp_csr_t::base_atp_csr_t(processor_t* const proc, const reg_t addr): + basic_csr_t(proc, addr, 0) { +} + + +bool base_atp_csr_t::unlogged_write(const reg_t val) noexcept { + const reg_t newval = proc->supports_impl(IMPL_MMU) ? compute_new_satp(val) : 0; + if (newval != read()) + proc->get_mmu()->flush_tlb(); + return basic_csr_t::unlogged_write(newval); +} + +bool base_atp_csr_t::satp_valid(reg_t val) const noexcept { + if (proc->get_xlen() == 32) { + switch (get_field(val, SATP32_MODE)) { + case SATP_MODE_SV32: return proc->supports_impl(IMPL_MMU_SV32); + case SATP_MODE_OFF: return true; + default: return false; + } + } else { + switch (get_field(val, SATP64_MODE)) { + case SATP_MODE_SV39: return proc->supports_impl(IMPL_MMU_SV39); + case SATP_MODE_SV48: return proc->supports_impl(IMPL_MMU_SV48); + case SATP_MODE_SV57: return proc->supports_impl(IMPL_MMU_SV57); + case SATP_MODE_OFF: return true; + default: return false; + } + } +} + +reg_t base_atp_csr_t::compute_new_satp(reg_t val) const noexcept { + reg_t rv64_ppn_mask = (reg_t(1) << (MAX_PADDR_BITS - PGSHIFT)) - 1; + + reg_t mode_mask = proc->get_xlen() == 32 ? SATP32_MODE : SATP64_MODE; + reg_t asid_mask_if_enabled = proc->get_xlen() == 32 ? SATP32_ASID : SATP64_ASID; + reg_t asid_mask = proc->supports_impl(IMPL_MMU_ASID) ? asid_mask_if_enabled : 0; + reg_t ppn_mask = proc->get_xlen() == 32 ? SATP32_PPN : SATP64_PPN & rv64_ppn_mask; + reg_t new_mask = (satp_valid(val) ? mode_mask : 0) | asid_mask | ppn_mask; + reg_t old_mask = satp_valid(val) ? 0 : mode_mask; + + return (new_mask & val) | (old_mask & read()); +} + +satp_csr_t::satp_csr_t(processor_t* const proc, const reg_t addr): + base_atp_csr_t(proc, addr) { +} + +void satp_csr_t::verify_permissions(insn_t insn, bool write) const { + base_atp_csr_t::verify_permissions(insn, write); + if (get_field(state->mstatus->read(), MSTATUS_TVM)) + require(state->prv >= PRV_M); +} + +virtualized_satp_csr_t::virtualized_satp_csr_t(processor_t* const proc, satp_csr_t_p orig, csr_t_p virt): + virtualized_csr_t(proc, orig, virt), + orig_satp(orig) { +} + +void virtualized_satp_csr_t::verify_permissions(insn_t insn, bool write) const { + virtualized_csr_t::verify_permissions(insn, write); + + // If satp is accessed from VS mode, it's really accessing vsatp, + // and the hstatus.VTVM bit controls. + if (state->v) { + if (get_field(state->hstatus->read(), HSTATUS_VTVM)) + throw trap_virtual_instruction(insn.bits()); + } + else { + orig_csr->verify_permissions(insn, write); + } +} + +bool virtualized_satp_csr_t::unlogged_write(const reg_t val) noexcept { + // If unsupported Mode field: no change to contents + const reg_t newval = orig_satp->satp_valid(val) ? val : read(); + return virtualized_csr_t::unlogged_write(newval); +} + + +// implement class wide_counter_csr_t +wide_counter_csr_t::wide_counter_csr_t(processor_t* const proc, const reg_t addr): + csr_t(proc, addr), + val(0) { +} + +reg_t wide_counter_csr_t::read() const noexcept { + return val; +} + +void wide_counter_csr_t::bump(const reg_t howmuch) noexcept { + val += howmuch; // to keep log reasonable size, don't log every bump +} + +bool wide_counter_csr_t::unlogged_write(const reg_t val) noexcept { + if (proc->get_xlen() == 32) + this->val = (this->val >> 32 << 32) | (val & 0xffffffffU); + else + this->val = val; + // The ISA mandates that if an instruction writes instret, the write + // takes precedence over the increment to instret. However, Spike + // unconditionally increments instret after executing an instruction. + // Correct for this artifact by decrementing instret here. + this->val--; + return true; +} + +reg_t wide_counter_csr_t::written_value() const noexcept { + // Re-adjust for upcoming bump() + return this->val + 1; +} + +void wide_counter_csr_t::write_upper_half(const reg_t val) noexcept { + this->val = (val << 32) | (this->val << 32 >> 32); + this->val--; // See comment above. + // Log upper half only. + log_special_write(address + (CSR_MINSTRETH - CSR_MINSTRET), written_value() >> 32); +} + + +counter_top_csr_t::counter_top_csr_t(processor_t* const proc, const reg_t addr, wide_counter_csr_t_p parent): + csr_t(proc, addr), + parent(parent) { +} + +reg_t counter_top_csr_t::read() const noexcept { + return parent->read() >> 32; +} + +bool counter_top_csr_t::unlogged_write(const reg_t val) noexcept { + parent->write_upper_half(val); + return true; +} + + +proxy_csr_t::proxy_csr_t(processor_t* const proc, const reg_t addr, csr_t_p delegate): + csr_t(proc, addr), + delegate(delegate) { +} + +reg_t proxy_csr_t::read() const noexcept { + return delegate->read(); +} + +bool proxy_csr_t::unlogged_write(const reg_t val) noexcept { + delegate->write(val); // log only under the original (delegate's) name + return false; +} + + +const_csr_t::const_csr_t(processor_t* const proc, const reg_t addr, reg_t val): + csr_t(proc, addr), + val(val) { +} + +reg_t const_csr_t::read() const noexcept { + return val; +} + +bool const_csr_t::unlogged_write(const reg_t val) noexcept { + return false; +} + + +counter_proxy_csr_t::counter_proxy_csr_t(processor_t* const proc, const reg_t addr, csr_t_p delegate): + proxy_csr_t(proc, addr, delegate) { +} + +bool counter_proxy_csr_t::myenable(csr_t_p counteren) const noexcept { + return 1 & (counteren->read() >> (address & 31)); +} + +void counter_proxy_csr_t::verify_permissions(insn_t insn, bool write) const { + const bool mctr_ok = (state->prv < PRV_M) ? myenable(state->mcounteren) : true; + const bool hctr_ok = state->v ? myenable(state->hcounteren) : true; + const bool sctr_ok = (proc->extension_enabled('S') && state->prv < PRV_S) ? myenable(state->scounteren) : true; + + if (write || !mctr_ok) + throw trap_illegal_instruction(insn.bits()); + if (!hctr_ok) + throw trap_virtual_instruction(insn.bits()); + if (!sctr_ok) { + if (state->v) + throw trap_virtual_instruction(insn.bits()); + else + throw trap_illegal_instruction(insn.bits()); + } +} + + +hypervisor_csr_t::hypervisor_csr_t(processor_t* const proc, const reg_t addr): + basic_csr_t(proc, addr, 0) { +} + +void hypervisor_csr_t::verify_permissions(insn_t insn, bool write) const { + basic_csr_t::verify_permissions(insn, write); + if (!proc->extension_enabled('H')) + throw trap_illegal_instruction(insn.bits()); +} + + +hideleg_csr_t::hideleg_csr_t(processor_t* const proc, const reg_t addr, csr_t_p mideleg): + masked_csr_t(proc, addr, MIP_VS_MASK, 0), + mideleg(mideleg) { +} + +reg_t hideleg_csr_t::read() const noexcept { + return masked_csr_t::read() & mideleg->read(); +}; + + +hgatp_csr_t::hgatp_csr_t(processor_t* const proc, const reg_t addr): + basic_csr_t(proc, addr, 0) { +} + +void hgatp_csr_t::verify_permissions(insn_t insn, bool write) const { + basic_csr_t::verify_permissions(insn, write); + if (!state->v && get_field(state->mstatus->read(), MSTATUS_TVM)) + require_privilege(PRV_M); +} + +bool hgatp_csr_t::unlogged_write(const reg_t val) noexcept { + proc->get_mmu()->flush_tlb(); + + reg_t mask; + if (proc->get_const_xlen() == 32) { + mask = HGATP32_PPN | + HGATP32_MODE | + proc->supports_impl(IMPL_MMU_VMID) ? HGATP32_VMID : 0; + } else { + mask = (HGATP64_PPN & ((reg_t(1) << (MAX_PADDR_BITS - PGSHIFT)) - 1)) | + (proc->supports_impl(IMPL_MMU_VMID) ? HGATP64_VMID : 0); + + if (get_field(val, HGATP64_MODE) == HGATP_MODE_OFF || + (proc->supports_impl(IMPL_MMU_SV39) && get_field(val, HGATP64_MODE) == HGATP_MODE_SV39X4) || + (proc->supports_impl(IMPL_MMU_SV48) && get_field(val, HGATP64_MODE) == HGATP_MODE_SV48X4) || + (proc->supports_impl(IMPL_MMU_SV57) && get_field(val, HGATP64_MODE) == HGATP_MODE_SV57X4)) + mask |= HGATP64_MODE; + } + mask &= ~(reg_t)3; + return basic_csr_t::unlogged_write((read() & ~mask) | (val & mask)); +} + + +tselect_csr_t::tselect_csr_t(processor_t* const proc, const reg_t addr): + basic_csr_t(proc, addr, 0) { +} + +bool tselect_csr_t::unlogged_write(const reg_t val) noexcept { + return basic_csr_t::unlogged_write((val < proc->TM.count()) ? val : read()); +} + + +tdata1_csr_t::tdata1_csr_t(processor_t* const proc, const reg_t addr): + csr_t(proc, addr) { +} + +reg_t tdata1_csr_t::read() const noexcept { + return proc->TM.tdata1_read(proc, state->tselect->read()); +} + +bool tdata1_csr_t::unlogged_write(const reg_t val) noexcept { + return proc->TM.tdata1_write(proc, state->tselect->read(), val); +} + + +tdata2_csr_t::tdata2_csr_t(processor_t* const proc, const reg_t addr): + csr_t(proc, addr) { +} + +reg_t tdata2_csr_t::read() const noexcept { + return proc->TM.tdata2_read(proc, state->tselect->read()); +} + +bool tdata2_csr_t::unlogged_write(const reg_t val) noexcept { + return proc->TM.tdata2_write(proc, state->tselect->read(), val); +} + + +debug_mode_csr_t::debug_mode_csr_t(processor_t* const proc, const reg_t addr): + basic_csr_t(proc, addr, 0) { +} + +void debug_mode_csr_t::verify_permissions(insn_t insn, bool write) const { + basic_csr_t::verify_permissions(insn, write); + if (!state->debug_mode) + throw trap_illegal_instruction(insn.bits()); +} + +dpc_csr_t::dpc_csr_t(processor_t* const proc, const reg_t addr): + epc_csr_t(proc, addr) { +} + +void dpc_csr_t::verify_permissions(insn_t insn, bool write) const { + epc_csr_t::verify_permissions(insn, write); + if (!state->debug_mode) + throw trap_illegal_instruction(insn.bits()); +} + + +dcsr_csr_t::dcsr_csr_t(processor_t* const proc, const reg_t addr): + csr_t(proc, addr), + prv(0), + step(false), + ebreakm(false), + ebreakh(false), + ebreaks(false), + ebreaku(false), + halt(false), + cause(0) { +} + +void dcsr_csr_t::verify_permissions(insn_t insn, bool write) const { + csr_t::verify_permissions(insn, write); + if (!state->debug_mode) + throw trap_illegal_instruction(insn.bits()); +} + +reg_t dcsr_csr_t::read() const noexcept { + uint32_t v = 0; + v = set_field(v, DCSR_XDEBUGVER, 1); + v = set_field(v, DCSR_EBREAKM, ebreakm); + v = set_field(v, DCSR_EBREAKH, ebreakh); + v = set_field(v, DCSR_EBREAKS, ebreaks); + v = set_field(v, DCSR_EBREAKU, ebreaku); + v = set_field(v, DCSR_STOPCYCLE, 0); + v = set_field(v, DCSR_STOPTIME, 0); + v = set_field(v, DCSR_CAUSE, cause); + v = set_field(v, DCSR_STEP, step); + v = set_field(v, DCSR_PRV, prv); + return v; +} + +bool dcsr_csr_t::unlogged_write(const reg_t val) noexcept { + prv = get_field(val, DCSR_PRV); + step = get_field(val, DCSR_STEP); + // TODO: ndreset and fullreset + ebreakm = get_field(val, DCSR_EBREAKM); + ebreakh = get_field(val, DCSR_EBREAKH); + ebreaks = get_field(val, DCSR_EBREAKS); + ebreaku = get_field(val, DCSR_EBREAKU); + halt = get_field(val, DCSR_HALT); + return true; +} + +void dcsr_csr_t::write_cause_and_prv(uint8_t cause, reg_t prv) noexcept { + this->cause = cause; + this->prv = prv; + log_write(); +} + + +float_csr_t::float_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init): + masked_csr_t(proc, addr, mask, init) { +} + +void float_csr_t::verify_permissions(insn_t insn, bool write) const { + masked_csr_t::verify_permissions(insn, write); + require_fp; + if (!proc->extension_enabled('F')) + throw trap_illegal_instruction(insn.bits()); +} + +bool float_csr_t::unlogged_write(const reg_t val) noexcept { + dirty_fp_state; + return masked_csr_t::unlogged_write(val); +} + + +composite_csr_t::composite_csr_t(processor_t* const proc, const reg_t addr, csr_t_p upper_csr, csr_t_p lower_csr, const unsigned upper_lsb): + csr_t(proc, addr), + upper_csr(upper_csr), + lower_csr(lower_csr), + upper_lsb(upper_lsb) { +} + +void composite_csr_t::verify_permissions(insn_t insn, bool write) const { + // It is reasonable to assume that either underlying CSR will have + // the same permissions as this composite. + upper_csr->verify_permissions(insn, write); +} + +reg_t composite_csr_t::read() const noexcept { + return (upper_csr->read() << upper_lsb) | lower_csr->read(); +} + +bool composite_csr_t::unlogged_write(const reg_t val) noexcept { + upper_csr->write(val >> upper_lsb); + lower_csr->write(val); + return false; // logging is done only by the underlying CSRs +} + + +seed_csr_t::seed_csr_t(processor_t* const proc, const reg_t addr): + csr_t(proc, addr) { +} + +void seed_csr_t::verify_permissions(insn_t insn, bool write) const { + /* Read-only access disallowed due to wipe-on-read side effect */ + /* XXX mseccfg.sseed and mseccfg.useed should be verified. */ + if (!proc->extension_enabled(EXT_ZKR) || !write) + throw trap_illegal_instruction(insn.bits()); + csr_t::verify_permissions(insn, write); +} + +reg_t seed_csr_t::read() const noexcept { + return proc->es.get_seed(); +} + +bool seed_csr_t::unlogged_write(const reg_t val) noexcept { + proc->es.set_seed(val); + return true; +} + + + +vector_csr_t::vector_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init): + basic_csr_t(proc, addr, init), + mask(mask) { +} + +void vector_csr_t::verify_permissions(insn_t insn, bool write) const { + require_vector_vs; + if (!proc->extension_enabled('V')) + throw trap_illegal_instruction(insn.bits()); + basic_csr_t::verify_permissions(insn, write); +} + +void vector_csr_t::write_raw(const reg_t val) noexcept { + const bool success = basic_csr_t::unlogged_write(val); + if (success) + log_write(); +} + +bool vector_csr_t::unlogged_write(const reg_t val) noexcept { + if (mask == 0) return false; + dirty_vs_state; + return basic_csr_t::unlogged_write(val & mask); +} + + +vxsat_csr_t::vxsat_csr_t(processor_t* const proc, const reg_t addr): + masked_csr_t(proc, addr, /*mask*/ 1, /*init*/ 0) { +} + +void vxsat_csr_t::verify_permissions(insn_t insn, bool write) const { + require_vector_vs; + if (!proc->extension_enabled('V') && !proc->extension_enabled(EXT_ZPN)) + throw trap_illegal_instruction(insn.bits()); + masked_csr_t::verify_permissions(insn, write); +} + +bool vxsat_csr_t::unlogged_write(const reg_t val) noexcept { + dirty_vs_state; + return masked_csr_t::unlogged_write(val); +} diff --git a/vendor/riscv-isa-sim/riscv/csrs.h b/vendor/riscv-isa-sim/riscv/csrs.h new file mode 100644 index 00000000..ed039555 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/csrs.h @@ -0,0 +1,701 @@ +// See LICENSE for license details. +#ifndef _RISCV_CSRS_H +#define _RISCV_CSRS_H + +// For reg_t: +#include "decode.h" +// For std::shared_ptr +#include +// For access_type: +#include "memtracer.h" +#include + +class processor_t; +struct state_t; + +// Parent, abstract class for all CSRs +class csr_t { + public: + csr_t(processor_t* const proc, const reg_t addr); + + // Throw exception if read/write disallowed. + virtual void verify_permissions(insn_t insn, bool write) const; + + // read() returns the architectural value of this CSR. No permission + // checking needed or allowed. Side effects not allowed. + virtual reg_t read() const noexcept = 0; + + // write() updates the architectural value of this CSR. No + // permission checking needed or allowed. + // Child classes must implement unlogged_write() + void write(const reg_t val) noexcept; + + virtual ~csr_t(); + + protected: + // Return value indicates success; false means no write actually occurred + virtual bool unlogged_write(const reg_t val) noexcept = 0; + + // Record this CSR update (which has already happened) in the commit log + void log_write() const noexcept; + + // Record a write to an alternate CSR (e.g. minstreth instead of minstret) + void log_special_write(const reg_t address, const reg_t val) const noexcept; + + // What value was written to this reg? Default implementation simply + // calls read(), but a few CSRs are special. + virtual reg_t written_value() const noexcept; + + processor_t* const proc; + state_t* const state; + public: + const reg_t address; + private: + const unsigned csr_priv; + const bool csr_read_only; +}; + +typedef std::shared_ptr csr_t_p; + + +// Basic CSRs, with XLEN bits fully readable and writable. +class basic_csr_t: public csr_t { + public: + basic_csr_t(processor_t* const proc, const reg_t addr, const reg_t init); + + virtual reg_t read() const noexcept override { + return val; + } + + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + reg_t val; +}; + + +class pmpaddr_csr_t: public csr_t { + public: + pmpaddr_csr_t(processor_t* const proc, const reg_t addr); + virtual void verify_permissions(insn_t insn, bool write) const override; + virtual reg_t read() const noexcept override; + + // Does a 4-byte access at the specified address match this PMP entry? + bool match4(reg_t addr) const noexcept; + + // Does the specified range match only a proper subset of this page? + bool subset_match(reg_t addr, reg_t len) const noexcept; + + // Is the specified access allowed given the pmpcfg privileges? + bool access_ok(access_type type, reg_t mode) const noexcept; + + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + // Assuming this is configured as TOR, return address for top of + // range. Also forms bottom-of-range for next-highest pmpaddr + // register if that one is TOR. + reg_t tor_paddr() const noexcept; + + // Assuming this is configured as TOR, return address for bottom of + // range. This is tor_paddr() from the previous pmpaddr register. + reg_t tor_base_paddr() const noexcept; + + // Assuming this is configured as NAPOT or NA4, return mask for paddr. + // E.g. for 4KiB region, returns 0xffffffff_fffff000. + reg_t napot_mask() const noexcept; + + bool next_locked_and_tor() const noexcept; + reg_t val; + friend class pmpcfg_csr_t; // so he can access cfg + uint8_t cfg; + const size_t pmpidx; +}; + +typedef std::shared_ptr pmpaddr_csr_t_p; + +class pmpcfg_csr_t: public csr_t { + public: + pmpcfg_csr_t(processor_t* const proc, const reg_t addr); + virtual reg_t read() const noexcept override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; +}; + +class mseccfg_csr_t: public csr_t { + public: + mseccfg_csr_t(processor_t* const proc, const reg_t addr); + virtual reg_t read() const noexcept override; + bool get_mml() const noexcept; + bool get_mmwp() const noexcept; + bool get_rlb() const noexcept; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + reg_t mseccfg_val; + reg_t pmplock_recorded; + friend class pmpcfg_csr_t; //pmpcfg needs to access pmplock_recorded + //friend class pmpaddr_csr_t; +}; + +typedef std::shared_ptr mseccfg_csr_t_p; + +// For CSRs that have a virtualized copy under another name. Each +// instance of virtualized_csr_t will read/write one of two CSRs, +// based on state.v. E.g. sscratch, stval, etc. +// +// Example: sscratch and vsscratch are both instances of basic_csr_t. +// The csrmap will contain a virtualized_csr_t under sscratch's +// address, plus the vsscratch basic_csr_t under its address. + +class virtualized_csr_t: public csr_t { + public: + virtualized_csr_t(processor_t* const proc, csr_t_p orig, csr_t_p virt); + + virtual reg_t read() const noexcept override; + // Instead of using state.v, explicitly request original or virtual: + reg_t readvirt(bool virt) const noexcept; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + csr_t_p orig_csr; + csr_t_p virt_csr; +}; + +typedef std::shared_ptr virtualized_csr_t_p; + +// For mepc, sepc, and vsepc +class epc_csr_t: public csr_t { + public: + epc_csr_t(processor_t* const proc, const reg_t addr); + + virtual reg_t read() const noexcept override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + reg_t val; +}; + + +// For mtvec, stvec, and vstvec +class tvec_csr_t: public csr_t { + public: + tvec_csr_t(processor_t* const proc, const reg_t addr); + + virtual reg_t read() const noexcept override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + reg_t val; +}; + + +// For mcause, scause, and vscause +class cause_csr_t: public basic_csr_t { + public: + cause_csr_t(processor_t* const proc, const reg_t addr); + + virtual reg_t read() const noexcept override; +}; + + +// For *status family of CSRs +class base_status_csr_t: public csr_t { + public: + base_status_csr_t(processor_t* const proc, const reg_t addr); + + bool field_exists(const reg_t which) { + return (sstatus_write_mask & which) != 0; + } + + protected: + reg_t adjust_sd(const reg_t val) const noexcept; + void maybe_flush_tlb(const reg_t newval) noexcept; + const bool has_page; + const reg_t sstatus_write_mask; + const reg_t sstatus_read_mask; + private: + reg_t compute_sstatus_write_mask() const noexcept; +}; + +typedef std::shared_ptr base_status_csr_t_p; + + +// For vsstatus, which is its own separate architectural register +// (unlike sstatus) +class vsstatus_csr_t final: public base_status_csr_t { + public: + vsstatus_csr_t(processor_t* const proc, const reg_t addr); + + reg_t read() const noexcept override { + return val; + } + + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + reg_t val; +}; + +typedef std::shared_ptr vsstatus_csr_t_p; + + +class mstatus_csr_t final: public base_status_csr_t { + public: + mstatus_csr_t(processor_t* const proc, const reg_t addr); + + reg_t read() const noexcept override { + return val; + } + + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + reg_t val; + friend class mstatush_csr_t; +}; + +typedef std::shared_ptr mstatus_csr_t_p; + + +class mstatush_csr_t: public csr_t { + public: + mstatush_csr_t(processor_t* const proc, const reg_t addr, mstatus_csr_t_p mstatus); + virtual reg_t read() const noexcept override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + mstatus_csr_t_p mstatus; + const reg_t mask; +}; + + +class sstatus_proxy_csr_t final: public base_status_csr_t { + public: + sstatus_proxy_csr_t(processor_t* const proc, const reg_t addr, mstatus_csr_t_p mstatus); + + reg_t read() const noexcept override { + return mstatus->read() & sstatus_read_mask; + } + + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + mstatus_csr_t_p mstatus; +}; + +typedef std::shared_ptr sstatus_proxy_csr_t_p; + +class sstatus_csr_t: public virtualized_csr_t { + public: + sstatus_csr_t(processor_t* const proc, sstatus_proxy_csr_t_p orig, vsstatus_csr_t_p virt); + + // Set FS, VS, or XS bits to dirty + void dirty(const reg_t dirties); + // Return true if the specified bits are not 00 (Off) + bool enabled(const reg_t which); + private: + sstatus_proxy_csr_t_p orig_sstatus; + vsstatus_csr_t_p virt_sstatus; +}; + +typedef std::shared_ptr sstatus_csr_t_p; + + +class misa_csr_t final: public basic_csr_t { + public: + misa_csr_t(processor_t* const proc, const reg_t addr, const reg_t max_isa); + + bool extension_enabled(unsigned char ext) const noexcept { + assert(ext >= 'A' && ext <= 'Z'); + return (read() >> (ext - 'A')) & 1; + } + + bool extension_enabled_const(unsigned char ext) const noexcept; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + const reg_t max_isa; + const reg_t write_mask; + const reg_t dependency(const reg_t val, const char feature, const char depends_on) const noexcept; +}; + +typedef std::shared_ptr misa_csr_t_p; + + +class mip_or_mie_csr_t: public csr_t { + public: + mip_or_mie_csr_t(processor_t* const proc, const reg_t addr); + virtual reg_t read() const noexcept override final; + + void write_with_mask(const reg_t mask, const reg_t val) noexcept; + + protected: + virtual bool unlogged_write(const reg_t val) noexcept override final; + reg_t val; + private: + virtual reg_t write_mask() const noexcept = 0; +}; + + +// mip is special because some of the bits are driven by hardware pins +class mip_csr_t: public mip_or_mie_csr_t { + public: + mip_csr_t(processor_t* const proc, const reg_t addr); + + // Does not log. Used by external things (clint) that wiggle bits in mip. + void backdoor_write_with_mask(const reg_t mask, const reg_t val) noexcept; + private: + virtual reg_t write_mask() const noexcept override; +}; + +typedef std::shared_ptr mip_csr_t_p; + + +class mie_csr_t: public mip_or_mie_csr_t { + public: + mie_csr_t(processor_t* const proc, const reg_t addr); + private: + virtual reg_t write_mask() const noexcept override; +}; + +typedef std::shared_ptr mie_csr_t_p; + + +// For sip, hip, hvip, vsip, sie, hie, vsie which are all just (masked +// & shifted) views into mip or mie. Each pair will have one of these +// objects describing the view, e.g. one for sip+sie, one for hip+hie, +// etc. +class generic_int_accessor_t { + public: + enum mask_mode_t { NONE, MIDELEG, HIDELEG }; + + generic_int_accessor_t(state_t* const state, + const reg_t read_mask, + const reg_t ip_write_mask, + const reg_t ie_write_mask, + const mask_mode_t mask_mode, + const int shiftamt); + reg_t ip_read() const noexcept; + void ip_write(const reg_t val) noexcept; + reg_t ie_read() const noexcept; + void ie_write(const reg_t val) noexcept; + private: + state_t* const state; + const reg_t read_mask; + const reg_t ip_write_mask; + const reg_t ie_write_mask; + const bool mask_mideleg; + const bool mask_hideleg; + const int shiftamt; + reg_t deleg_mask() const; +}; + +typedef std::shared_ptr generic_int_accessor_t_p; + + +// For all CSRs that are simply (masked & shifted) views into mip +class mip_proxy_csr_t: public csr_t { + public: + mip_proxy_csr_t(processor_t* const proc, const reg_t addr, generic_int_accessor_t_p accr); + virtual reg_t read() const noexcept override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + generic_int_accessor_t_p accr; +}; + +// For all CSRs that are simply (masked & shifted) views into mie +class mie_proxy_csr_t: public csr_t { + public: + mie_proxy_csr_t(processor_t* const proc, const reg_t addr, generic_int_accessor_t_p accr); + virtual reg_t read() const noexcept override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + generic_int_accessor_t_p accr; +}; + + + +class mideleg_csr_t: public basic_csr_t { + public: + mideleg_csr_t(processor_t* const proc, const reg_t addr); + virtual void verify_permissions(insn_t insn, bool write) const override; + virtual reg_t read() const noexcept override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; +}; + + +class medeleg_csr_t: public basic_csr_t { + public: + medeleg_csr_t(processor_t* const proc, const reg_t addr); + virtual void verify_permissions(insn_t insn, bool write) const override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + const reg_t hypervisor_exceptions; +}; + + +// For CSRs with certain bits hardwired +class masked_csr_t: public basic_csr_t { + public: + masked_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init); + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + const reg_t mask; +}; + + +// For satp and vsatp +// These are three classes in order to handle the [V]TVM bits permission checks +class base_atp_csr_t: public basic_csr_t { + public: + base_atp_csr_t(processor_t* const proc, const reg_t addr); + bool satp_valid(reg_t val) const noexcept; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + reg_t compute_new_satp(reg_t val) const noexcept; +}; + +class satp_csr_t: public base_atp_csr_t { + public: + satp_csr_t(processor_t* const proc, const reg_t addr); + virtual void verify_permissions(insn_t insn, bool write) const override; +}; + +typedef std::shared_ptr satp_csr_t_p; + +class virtualized_satp_csr_t: public virtualized_csr_t { + public: + virtualized_satp_csr_t(processor_t* const proc, satp_csr_t_p orig, csr_t_p virt); + virtual void verify_permissions(insn_t insn, bool write) const override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + satp_csr_t_p orig_satp; +}; + + +// For minstret and mcycle, which are always 64 bits, but in RV32 are +// split into high and low halves. The first class always holds the +// full 64-bit value. +class wide_counter_csr_t: public csr_t { + public: + wide_counter_csr_t(processor_t* const proc, const reg_t addr); + // Always returns full 64-bit value + virtual reg_t read() const noexcept override; + void bump(const reg_t howmuch) noexcept; + void write_upper_half(const reg_t val) noexcept; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + virtual reg_t written_value() const noexcept override; + private: + reg_t val; +}; + +typedef std::shared_ptr wide_counter_csr_t_p; + + +// A simple proxy to read/write the upper half of minstret/mcycle +class counter_top_csr_t: public csr_t { + public: + counter_top_csr_t(processor_t* const proc, const reg_t addr, wide_counter_csr_t_p parent); + virtual reg_t read() const noexcept override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + wide_counter_csr_t_p parent; +}; + +typedef std::shared_ptr counter_top_csr_t_p; + + +// For a CSR that is an alias of another +class proxy_csr_t: public csr_t { + public: + proxy_csr_t(processor_t* const proc, const reg_t addr, csr_t_p delegate); + virtual reg_t read() const noexcept override; + protected: + bool unlogged_write(const reg_t val) noexcept override; + private: + csr_t_p delegate; +}; + + +// For a CSR with a fixed, unchanging value +class const_csr_t: public csr_t { + public: + const_csr_t(processor_t* const proc, const reg_t addr, reg_t val); + virtual reg_t read() const noexcept override; + protected: + bool unlogged_write(const reg_t val) noexcept override; + private: + const reg_t val; +}; + + +// For a CSR that is an unprivileged accessor of a privileged counter +class counter_proxy_csr_t: public proxy_csr_t { + public: + counter_proxy_csr_t(processor_t* const proc, const reg_t addr, csr_t_p delegate); + virtual void verify_permissions(insn_t insn, bool write) const override; + private: + bool myenable(csr_t_p counteren) const noexcept; +}; + + +// For machine-level CSRs that only exist with Hypervisor +class hypervisor_csr_t: public basic_csr_t { + public: + hypervisor_csr_t(processor_t* const proc, const reg_t addr); + virtual void verify_permissions(insn_t insn, bool write) const override; +}; + + +class hideleg_csr_t: public masked_csr_t { + public: + hideleg_csr_t(processor_t* const proc, const reg_t addr, csr_t_p mideleg); + virtual reg_t read() const noexcept override; + private: + csr_t_p mideleg; +}; + + +class hgatp_csr_t: public basic_csr_t { + public: + hgatp_csr_t(processor_t* const proc, const reg_t addr); + virtual void verify_permissions(insn_t insn, bool write) const override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; +}; + + +class tselect_csr_t: public basic_csr_t { + public: + tselect_csr_t(processor_t* const proc, const reg_t addr); + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; +}; + + +class tdata1_csr_t: public csr_t { + public: + tdata1_csr_t(processor_t* const proc, const reg_t addr); + virtual reg_t read() const noexcept override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; +}; + +class tdata2_csr_t: public csr_t { + public: + tdata2_csr_t(processor_t* const proc, const reg_t addr); + virtual reg_t read() const noexcept override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; +}; + +// For CSRs that are only writable from debug mode +class debug_mode_csr_t: public basic_csr_t { + public: + debug_mode_csr_t(processor_t* const proc, const reg_t addr); + virtual void verify_permissions(insn_t insn, bool write) const override; +}; + +typedef std::shared_ptr tdata2_csr_t_p; + + +class dpc_csr_t: public epc_csr_t { + public: + dpc_csr_t(processor_t* const proc, const reg_t addr); + virtual void verify_permissions(insn_t insn, bool write) const override; +}; + +class dcsr_csr_t: public csr_t { + public: + dcsr_csr_t(processor_t* const proc, const reg_t addr); + virtual void verify_permissions(insn_t insn, bool write) const override; + virtual reg_t read() const noexcept override; + void write_cause_and_prv(uint8_t cause, reg_t prv) noexcept; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + public: + uint8_t prv; + bool step; + bool ebreakm; + bool ebreakh; + bool ebreaks; + bool ebreaku; + bool halt; + uint8_t cause; +}; + +typedef std::shared_ptr dcsr_csr_t_p; + + +class float_csr_t final: public masked_csr_t { + public: + float_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init); + virtual void verify_permissions(insn_t insn, bool write) const override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; +}; + +typedef std::shared_ptr float_csr_t_p; + + +// For a CSR like FCSR, that is actually a view into multiple +// underlying registers. +class composite_csr_t: public csr_t { + public: + // We assume the lower_csr maps to bit 0. + composite_csr_t(processor_t* const proc, const reg_t addr, csr_t_p upper_csr, csr_t_p lower_csr, const unsigned upper_lsb); + virtual void verify_permissions(insn_t insn, bool write) const override; + virtual reg_t read() const noexcept override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + csr_t_p upper_csr; + csr_t_p lower_csr; + const unsigned upper_lsb; +}; + + +class seed_csr_t: public csr_t { + public: + seed_csr_t(processor_t* const proc, const reg_t addr); + virtual void verify_permissions(insn_t insn, bool write) const override; + virtual reg_t read() const noexcept override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; +}; + + +class vector_csr_t: public basic_csr_t { + public: + vector_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init=0); + virtual void verify_permissions(insn_t insn, bool write) const override; + // Write without regard to mask, and without touching mstatus.VS + void write_raw(const reg_t val) noexcept; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + reg_t mask; +}; + +typedef std::shared_ptr vector_csr_t_p; + + +// For CSRs shared between Vector and P extensions (vxsat) +class vxsat_csr_t: public masked_csr_t { + public: + vxsat_csr_t(processor_t* const proc, const reg_t addr); + virtual void verify_permissions(insn_t insn, bool write) const override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; +}; + +#endif diff --git a/vendor/riscv-isa-sim/riscv/debug_defines.h b/vendor/riscv-isa-sim/riscv/debug_defines.h new file mode 100644 index 00000000..9ce54b72 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/debug_defines.h @@ -0,0 +1,2538 @@ +/* + * This file is auto-generated by running 'make debug_defines.h' in + * https://github.com/riscv/riscv-debug-spec/ (4ce69ad) + * License: Creative Commons Attribution 4.0 International Public License (CC BY 4.0) + */ + +#define DTM_IDCODE 0x01 +/* + * Identifies the release version of this part. + */ +#define DTM_IDCODE_VERSION_OFFSET 28 +#define DTM_IDCODE_VERSION_LENGTH 4 +#define DTM_IDCODE_VERSION (0xfU << DTM_IDCODE_VERSION_OFFSET) +/* + * Identifies the designer's part number of this part. + */ +#define DTM_IDCODE_PARTNUMBER_OFFSET 12 +#define DTM_IDCODE_PARTNUMBER_LENGTH 16 +#define DTM_IDCODE_PARTNUMBER (0xffffU << DTM_IDCODE_PARTNUMBER_OFFSET) +/* + * Identifies the designer/manufacturer of this part. Bits 6:0 must be + * bits 6:0 of the designer/manufacturer's Identification Code as + * assigned by JEDEC Standard JEP106. Bits 10:7 contain the modulo-16 + * count of the number of continuation characters (0x7f) in that same + * Identification Code. + */ +#define DTM_IDCODE_MANUFID_OFFSET 1 +#define DTM_IDCODE_MANUFID_LENGTH 11 +#define DTM_IDCODE_MANUFID (0x7ffU << DTM_IDCODE_MANUFID_OFFSET) +#define DTM_IDCODE_1_OFFSET 0 +#define DTM_IDCODE_1_LENGTH 1 +#define DTM_IDCODE_1 (0x1U << DTM_IDCODE_1_OFFSET) +#define DTM_DTMCS 0x10 +/* + * Writing 1 to this bit does a hard reset of the DTM, + * causing the DTM to forget about any outstanding DMI transactions, and + * returning all registers and internal state to their reset value. + * In general this should only be used when the Debugger has + * reason to expect that the outstanding DMI transaction will never + * complete (e.g. a reset condition caused an inflight DMI transaction to + * be cancelled). + */ +#define DTM_DTMCS_DMIHARDRESET_OFFSET 17 +#define DTM_DTMCS_DMIHARDRESET_LENGTH 1 +#define DTM_DTMCS_DMIHARDRESET (0x1U << DTM_DTMCS_DMIHARDRESET_OFFSET) +/* + * Writing 1 to this bit clears the sticky error state, but does + * not affect outstanding DMI transactions. + */ +#define DTM_DTMCS_DMIRESET_OFFSET 16 +#define DTM_DTMCS_DMIRESET_LENGTH 1 +#define DTM_DTMCS_DMIRESET (0x1U << DTM_DTMCS_DMIRESET_OFFSET) +/* + * This is a hint to the debugger of the minimum number of + * cycles a debugger should spend in + * Run-Test/Idle after every DMI scan to avoid a `busy' + * return code (\FdtmDtmcsDmistat of 3). A debugger must still + * check \FdtmDtmcsDmistat when necessary. + * + * 0: It is not necessary to enter Run-Test/Idle at all. + * + * 1: Enter Run-Test/Idle and leave it immediately. + * + * 2: Enter Run-Test/Idle and stay there for 1 cycle before leaving. + * + * And so on. + */ +#define DTM_DTMCS_IDLE_OFFSET 12 +#define DTM_DTMCS_IDLE_LENGTH 3 +#define DTM_DTMCS_IDLE (0x7U << DTM_DTMCS_IDLE_OFFSET) +/* + * 0: No error. + * + * 1: Reserved. Interpret the same as 2. + * + * 2: An operation failed (resulted in \FdtmDmiOp of 2). + * + * 3: An operation was attempted while a DMI access was still in + * progress (resulted in \FdtmDmiOp of 3). + */ +#define DTM_DTMCS_DMISTAT_OFFSET 10 +#define DTM_DTMCS_DMISTAT_LENGTH 2 +#define DTM_DTMCS_DMISTAT (0x3U << DTM_DTMCS_DMISTAT_OFFSET) +/* + * The size of \FdmSbaddressZeroAddress in \RdtmDmi. + */ +#define DTM_DTMCS_ABITS_OFFSET 4 +#define DTM_DTMCS_ABITS_LENGTH 6 +#define DTM_DTMCS_ABITS (0x3fU << DTM_DTMCS_ABITS_OFFSET) +/* + * 0: Version described in spec version 0.11. + * + * 1: Version described in spec versions 0.13 and 1.0. + * + * 15: Version not described in any available version of this spec. + */ +#define DTM_DTMCS_VERSION_OFFSET 0 +#define DTM_DTMCS_VERSION_LENGTH 4 +#define DTM_DTMCS_VERSION (0xfU << DTM_DTMCS_VERSION_OFFSET) +#define DTM_DMI 0x11 +/* + * Address used for DMI access. In Update-DR this value is used + * to access the DM over the DMI. + */ +#define DTM_DMI_ADDRESS_OFFSET 34 +#define DTM_DMI_ADDRESS_LENGTH abits +#define DTM_DMI_ADDRESS (((1L << abits) - 1) << DTM_DMI_ADDRESS_OFFSET) +/* + * The data to send to the DM over the DMI during Update-DR, and + * the data returned from the DM as a result of the previous operation. + */ +#define DTM_DMI_DATA_OFFSET 2 +#define DTM_DMI_DATA_LENGTH 32 +#define DTM_DMI_DATA (0xffffffffULL << DTM_DMI_DATA_OFFSET) +/* + * When the debugger writes this field, it has the following meaning: + * + * 0: Ignore \FdmSbdataZeroData and \FdmSbaddressZeroAddress. (nop) + * + * Don't send anything over the DMI during Update-DR. + * This operation should never result in a busy or error response. + * The address and data reported in the following Capture-DR + * are undefined. + * + * 1: Read from \FdmSbaddressZeroAddress. (read) + * + * 2: Write \FdmSbdataZeroData to \FdmSbaddressZeroAddress. (write) + * + * 3: Reserved. + * + * When the debugger reads this field, it means the following: + * + * 0: The previous operation completed successfully. + * + * 1: Reserved. + * + * 2: A previous operation failed. The data scanned into \RdtmDmi in + * this access will be ignored. This status is sticky and can be + * cleared by writing \FdtmDtmcsDmireset in \RdtmDtmcs. + * + * This indicates that the DM itself responded with an error. + * There are no specified cases in which the DM would + * respond with an error, and DMI is not required to support + * returning errors. + * + * 3: An operation was attempted while a DMI request is still in + * progress. The data scanned into \RdtmDmi in this access will be + * ignored. This status is sticky and can be cleared by writing + * \FdtmDtmcsDmireset in \RdtmDtmcs. If a debugger sees this status, it + * needs to give the target more TCK edges between Update-DR and + * Capture-DR. The simplest way to do that is to add extra transitions + * in Run-Test/Idle. + */ +#define DTM_DMI_OP_OFFSET 0 +#define DTM_DMI_OP_LENGTH 2 +#define DTM_DMI_OP (0x3ULL << DTM_DMI_OP_OFFSET) +#define CSR_DCSR 0x7b0 +/* + * 0: There is no debug support. + * + * 4: Debug support exists as it is described in this document. + * + * 15: There is debug support, but it does not conform to any + * available version of this spec. + */ +#define CSR_DCSR_DEBUGVER_OFFSET 28 +#define CSR_DCSR_DEBUGVER_LENGTH 4 +#define CSR_DCSR_DEBUGVER (0xfU << CSR_DCSR_DEBUGVER_OFFSET) +/* + * 0: {\tt ebreak} instructions in VS-mode behave as described in the + * Privileged Spec. + * + * 1: {\tt ebreak} instructions in VS-mode enter Debug Mode. + * + * This bit is hardwired to 0 if the hart does not support virtualization mode. + */ +#define CSR_DCSR_EBREAKVS_OFFSET 17 +#define CSR_DCSR_EBREAKVS_LENGTH 1 +#define CSR_DCSR_EBREAKVS (0x1U << CSR_DCSR_EBREAKVS_OFFSET) +/* + * 0: {\tt ebreak} instructions in VU-mode behave as described in the + * Privileged Spec. + * + * 1: {\tt ebreak} instructions in VU-mode enter Debug Mode. + * + * This bit is hardwired to 0 if the hart does not support virtualization mode. + */ +#define CSR_DCSR_EBREAKVU_OFFSET 16 +#define CSR_DCSR_EBREAKVU_LENGTH 1 +#define CSR_DCSR_EBREAKVU (0x1U << CSR_DCSR_EBREAKVU_OFFSET) +/* + * 0: {\tt ebreak} instructions in M-mode behave as described in the + * Privileged Spec. + * + * 1: {\tt ebreak} instructions in M-mode enter Debug Mode. + */ +#define CSR_DCSR_EBREAKM_OFFSET 15 +#define CSR_DCSR_EBREAKM_LENGTH 1 +#define CSR_DCSR_EBREAKM (0x1U << CSR_DCSR_EBREAKM_OFFSET) +/* + * 0: {\tt ebreak} instructions in S-mode behave as described in the + * Privileged Spec. + * + * 1: {\tt ebreak} instructions in S-mode enter Debug Mode. + * + * This bit is hardwired to 0 if the hart does not support S-mode. + */ +#define CSR_DCSR_EBREAKS_OFFSET 13 +#define CSR_DCSR_EBREAKS_LENGTH 1 +#define CSR_DCSR_EBREAKS (0x1U << CSR_DCSR_EBREAKS_OFFSET) +/* + * 0: {\tt ebreak} instructions in U-mode behave as described in the + * Privileged Spec. + * + * 1: {\tt ebreak} instructions in U-mode enter Debug Mode. + * + * This bit is hardwired to 0 if the hart does not support U-mode. + */ +#define CSR_DCSR_EBREAKU_OFFSET 12 +#define CSR_DCSR_EBREAKU_LENGTH 1 +#define CSR_DCSR_EBREAKU (0x1U << CSR_DCSR_EBREAKU_OFFSET) +/* + * 0: Interrupts (including NMI) are disabled during single stepping. + * + * 1: Interrupts (including NMI) are enabled during single stepping. + * + * Implementations may hard wire this bit to 0. + * In that case interrupt behavior can be emulated by the debugger. + * + * The debugger must not change the value of this bit while the hart + * is running. + */ +#define CSR_DCSR_STEPIE_OFFSET 11 +#define CSR_DCSR_STEPIE_LENGTH 1 +#define CSR_DCSR_STEPIE (0x1U << CSR_DCSR_STEPIE_OFFSET) +/* + * 0: Increment counters as usual. + * + * 1: Don't increment any hart-local counters while in Debug Mode or + * on {\tt ebreak} instructions that cause entry into Debug Mode. + * These counters include the {\tt instret} CSR. On single-hart cores + * {\tt cycle} should be stopped, but on multi-hart cores it must keep + * incrementing. + * + * An implementation may hardwire this bit to 0 or 1. + */ +#define CSR_DCSR_STOPCOUNT_OFFSET 10 +#define CSR_DCSR_STOPCOUNT_LENGTH 1 +#define CSR_DCSR_STOPCOUNT (0x1U << CSR_DCSR_STOPCOUNT_OFFSET) +/* + * 0: Increment \Rtime as usual. + * + * 1: Don't increment \Rtime while in Debug Mode. If all harts + * have \FcsrDcsrStoptime=1 and are in Debug Mode then \Rmtime + * is also allowed to stop incrementing. + * + * An implementation may hardwire this bit to 0 or 1. + */ +#define CSR_DCSR_STOPTIME_OFFSET 9 +#define CSR_DCSR_STOPTIME_LENGTH 1 +#define CSR_DCSR_STOPTIME (0x1U << CSR_DCSR_STOPTIME_OFFSET) +/* + * Explains why Debug Mode was entered. + * + * When there are multiple reasons to enter Debug Mode in a single + * cycle, hardware should set \FcsrDcsrCause to the cause with the highest + * priority. + * + * 1: An {\tt ebreak} instruction was executed. (priority 3) + * + * 2: A Trigger Module trigger fired with action=1. (priority 4) + * + * 3: The debugger requested entry to Debug Mode using \FdmDmcontrolHaltreq. + * (priority 1) + * + * 4: The hart single stepped because \FcsrDcsrStep was set. (priority 0, lowest) + * + * 5: The hart halted directly out of reset due to \Fresethaltreq. It + * is also acceptable to report 3 when this happens. (priority 2) + * + * 6: The hart halted because it's part of a halt group. (priority 5, + * highest) Harts may report 3 for this cause instead. + * + * Other values are reserved for future use. + */ +#define CSR_DCSR_CAUSE_OFFSET 6 +#define CSR_DCSR_CAUSE_LENGTH 3 +#define CSR_DCSR_CAUSE (0x7U << CSR_DCSR_CAUSE_OFFSET) +/* + * Extends the prv field with the virtualization mode the hart was operating + * in when Debug Mode was entered. The encoding is described in Table + * \ref{tab:privmode}. + * A debugger can change this value to change the hart's virtualization mode + * when exiting Debug Mode. + * This bit is hardwired to 0 on harts that do not support virtualization mode. + */ +#define CSR_DCSR_V_OFFSET 5 +#define CSR_DCSR_V_LENGTH 1 +#define CSR_DCSR_V (0x1U << CSR_DCSR_V_OFFSET) +/* + * 0: \FcsrMstatusMprv in \Rmstatus is ignored in Debug Mode. + * + * 1: \FcsrMstatusMprv in \Rmstatus takes effect in Debug Mode. + * + * Implementing this bit is optional. It may be tied to either 0 or 1. + */ +#define CSR_DCSR_MPRVEN_OFFSET 4 +#define CSR_DCSR_MPRVEN_LENGTH 1 +#define CSR_DCSR_MPRVEN (0x1U << CSR_DCSR_MPRVEN_OFFSET) +/* + * When set, there is a Non-Maskable-Interrupt (NMI) pending for the hart. + * + * Since an NMI can indicate a hardware error condition, + * reliable debugging may no longer be possible once this bit becomes set. + * This is implementation-dependent. + */ +#define CSR_DCSR_NMIP_OFFSET 3 +#define CSR_DCSR_NMIP_LENGTH 1 +#define CSR_DCSR_NMIP (0x1U << CSR_DCSR_NMIP_OFFSET) +/* + * When set and not in Debug Mode, the hart will only execute a single + * instruction and then enter Debug Mode. See Section~\ref{stepBit} + * for details. + * + * The debugger must not change the value of this bit while the hart + * is running. + */ +#define CSR_DCSR_STEP_OFFSET 2 +#define CSR_DCSR_STEP_LENGTH 1 +#define CSR_DCSR_STEP (0x1U << CSR_DCSR_STEP_OFFSET) +/* + * Contains the privilege mode the hart was operating in when Debug + * Mode was entered. The encoding is described in Table + * \ref{tab:privmode}. A debugger can change this value to change + * the hart's privilege mode when exiting Debug Mode. + * + * Not all privilege modes are supported on all harts. If the + * encoding written is not supported or the debugger is not allowed to + * change to it, the hart may change to any supported privilege mode. + */ +#define CSR_DCSR_PRV_OFFSET 0 +#define CSR_DCSR_PRV_LENGTH 2 +#define CSR_DCSR_PRV (0x3U << CSR_DCSR_PRV_OFFSET) +#define CSR_DPC 0x7b1 +#define CSR_DPC_DPC_OFFSET 0 +#define CSR_DPC_DPC_LENGTH DXLEN +#define CSR_DPC_DPC (((1L << DXLEN) - 1) << CSR_DPC_DPC_OFFSET) +#define CSR_DSCRATCH0 0x7b2 +#define CSR_DSCRATCH1 0x7b3 +#define CSR_TSELECT 0x7a0 +#define CSR_TSELECT_INDEX_OFFSET 0 +#define CSR_TSELECT_INDEX_LENGTH XLEN +#define CSR_TSELECT_INDEX (((1L << XLEN) - 1) << CSR_TSELECT_INDEX_OFFSET) +#define CSR_TDATA1 0x7a1 +/* + * 0: There is no trigger at this \RcsrTselect. + * + * 1: The trigger is a legacy SiFive address match trigger. These + * should not be implemented and aren't further documented here. + * + * 2: The trigger is an address/data match trigger. The remaining bits + * in this register act as described in \RcsrMcontrol. + * + * 3: The trigger is an instruction count trigger. The remaining bits + * in this register act as described in \RcsrIcount. + * + * 4: The trigger is an interrupt trigger. The remaining bits + * in this register act as described in \RcsrItrigger. + * + * 5: The trigger is an exception trigger. The remaining bits + * in this register act as described in \RcsrEtrigger. + * + * 6: The trigger is an address/data match trigger. The remaining bits + * in this register act as described in \RcsrMcontrolSix. This is similar + * to a type 2 trigger, but provides additional functionality and + * should be used instead of type 2 in newer implementations. + * + * 7: The trigger is a trigger source external to the TM. The + * remaining bits in this register act as described in \RcsrTmexttrigger. + * + * 12--14: These trigger types are available for non-standard use. + * + * 15: This trigger exists (so enumeration shouldn't terminate), but + * is not currently available. + * + * Other values are reserved for future use. + */ +#define CSR_TDATA1_TYPE_OFFSET (XLEN-4) +#define CSR_TDATA1_TYPE_LENGTH 4 +#define CSR_TDATA1_TYPE (0xfULL << CSR_TDATA1_TYPE_OFFSET) +/* + * If \FcsrTdataOneType is 0, then this bit is hard-wired to 0. + * + * 0: Both Debug and M-mode can write the {\tt tdata} registers at the + * selected \RcsrTselect. + * + * 1: Only Debug Mode can write the {\tt tdata} registers at the + * selected \RcsrTselect. Writes from other modes are ignored. + * + * This bit is only writable from Debug Mode. + * In ordinary use, external debuggers will always set this bit when + * configuring a trigger. + * When clearing this bit, debuggers should also set the action field + * (whose location depends on \FcsrTdataOneType) to something other + * than 1. + */ +#define CSR_TDATA1_DMODE_OFFSET (XLEN-5) +#define CSR_TDATA1_DMODE_LENGTH 1 +#define CSR_TDATA1_DMODE (0x1ULL << CSR_TDATA1_DMODE_OFFSET) +/* + * If \FcsrTdataOneType is 0, then this field is hard-wired to 0. + * + * Trigger-specific data. + */ +#define CSR_TDATA1_DATA_OFFSET 0 +#define CSR_TDATA1_DATA_LENGTH (XLEN - 5) +#define CSR_TDATA1_DATA (((1L << XLEN - 5) - 1) << CSR_TDATA1_DATA_OFFSET) +#define CSR_TDATA2 0x7a2 +#define CSR_TDATA2_DATA_OFFSET 0 +#define CSR_TDATA2_DATA_LENGTH XLEN +#define CSR_TDATA2_DATA (((1L << XLEN) - 1) << CSR_TDATA2_DATA_OFFSET) +#define CSR_TDATA3 0x7a3 +#define CSR_TDATA3_DATA_OFFSET 0 +#define CSR_TDATA3_DATA_LENGTH XLEN +#define CSR_TDATA3_DATA (((1L << XLEN) - 1) << CSR_TDATA3_DATA_OFFSET) +#define CSR_TINFO 0x7a4 +/* + * One bit for each possible \FcsrTdataOneType enumerated in \RcsrTdataOne. Bit N + * corresponds to type N. If the bit is set, then that type is + * supported by the currently selected trigger. + * + * If the currently selected trigger doesn't exist, this field + * contains 1. + */ +#define CSR_TINFO_INFO_OFFSET 0 +#define CSR_TINFO_INFO_LENGTH 16 +#define CSR_TINFO_INFO (0xffffULL << CSR_TINFO_INFO_OFFSET) +#define CSR_TCONTROL 0x7a5 +/* + * M-mode previous trigger enable field. + * + * \FcsrTcontrolMpte and \FcsrTcontrolMte provide one solution to a problem + * regarding triggers with action=0 firing in M-mode trap handlers. See + * Section~\ref{sec:nativetrigger} for more details. + * + * When a trap into M-mode is taken, \FcsrTcontrolMpte is set to the value of + * \FcsrTcontrolMte. + */ +#define CSR_TCONTROL_MPTE_OFFSET 7 +#define CSR_TCONTROL_MPTE_LENGTH 1 +#define CSR_TCONTROL_MPTE (0x1ULL << CSR_TCONTROL_MPTE_OFFSET) +/* + * M-mode trigger enable field. + * + * 0: Triggers with action=0 do not match/fire while the hart is in M-mode. + * + * 1: Triggers do match/fire while the hart is in M-mode. + * + * When a trap into M-mode is taken, \FcsrTcontrolMte is set to 0. When {\tt + * mret} is executed, \FcsrTcontrolMte is set to the value of \FcsrTcontrolMpte. + */ +#define CSR_TCONTROL_MTE_OFFSET 3 +#define CSR_TCONTROL_MTE_LENGTH 1 +#define CSR_TCONTROL_MTE (0x1ULL << CSR_TCONTROL_MTE_OFFSET) +#define CSR_HCONTEXT 0x6a8 +/* + * Hypervisor mode software can write a context number to this register, + * which can be used to set triggers that only fire in that specific + * context. + * + * An implementation may tie any number of upper bits in this field to + * 0. If the H extension is not implemented, it's recommended to implement + * no more than 6 bits on RV32 and 13 on RV64 (as visible through the + * \RcsrMcontext register). If the H extension is implemented, + * it's recommended to implement no more than 7 bits on RV32 + * and 14 on RV64. + */ +#define CSR_HCONTEXT_HCONTEXT_OFFSET 0 +#define CSR_HCONTEXT_HCONTEXT_LENGTH XLEN +#define CSR_HCONTEXT_HCONTEXT (((1L << XLEN) - 1) << CSR_HCONTEXT_HCONTEXT_OFFSET) +#define CSR_SCONTEXT 0x5a8 +/* + * Supervisor mode software can write a context number to this + * register, which can be used to set triggers that only fire in that + * specific context. + * + * An implementation may tie any number of high bits in this field to + * 0. It's recommended to implement no more than 16 bits on RV32, and + * 34 on RV64. + */ +#define CSR_SCONTEXT_DATA_OFFSET 0 +#define CSR_SCONTEXT_DATA_LENGTH XLEN +#define CSR_SCONTEXT_DATA (((1L << XLEN) - 1) << CSR_SCONTEXT_DATA_OFFSET) +#define CSR_MCONTEXT 0x7a8 +#define CSR_MSCONTEXT 0x7aa +#define CSR_MCONTROL 0x7a1 +#define CSR_MCONTROL_TYPE_OFFSET (XLEN-4) +#define CSR_MCONTROL_TYPE_LENGTH 4 +#define CSR_MCONTROL_TYPE (0xfULL << CSR_MCONTROL_TYPE_OFFSET) +#define CSR_MCONTROL_DMODE_OFFSET (XLEN-5) +#define CSR_MCONTROL_DMODE_LENGTH 1 +#define CSR_MCONTROL_DMODE (0x1ULL << CSR_MCONTROL_DMODE_OFFSET) +/* + * Specifies the largest naturally aligned powers-of-two (NAPOT) range + * supported by the hardware when \FcsrMcontrolMatch is 1. The value is the + * logarithm base 2 of the number of bytes in that range. + * A value of 0 indicates \FcsrMcontrolMatch 1 is not supported. + * A value of 63 corresponds to the maximum NAPOT range, which is + * $2^{63}$ bytes in size. + */ +#define CSR_MCONTROL_MASKMAX_OFFSET (XLEN-11) +#define CSR_MCONTROL_MASKMAX_LENGTH 6 +#define CSR_MCONTROL_MASKMAX (0x3fULL << CSR_MCONTROL_MASKMAX_OFFSET) +/* + * This field only exists when XLEN is at least 64. + * It contains the 2 high bits of the access size. The low bits + * come from \FcsrMcontrolSizelo. See \FcsrMcontrolSizelo for how this + * is used. + */ +#define CSR_MCONTROL_SIZEHI_OFFSET 21 +#define CSR_MCONTROL_SIZEHI_LENGTH 2 +#define CSR_MCONTROL_SIZEHI (0x3ULL << CSR_MCONTROL_SIZEHI_OFFSET) +/* + * If this bit is implemented then it must become set when this + * trigger fires and may become set when this trigger matches. + * The trigger's user can set or clear it at any + * time. It is used to determine which + * trigger(s) matched. If the bit is not implemented, it is always 0 + * and writing it has no effect. + */ +#define CSR_MCONTROL_HIT_OFFSET 20 +#define CSR_MCONTROL_HIT_LENGTH 1 +#define CSR_MCONTROL_HIT (0x1ULL << CSR_MCONTROL_HIT_OFFSET) +/* + * This bit determines the contents of the XLEN-bit compare values. + * + * 0: There is at least one compare value and it contains the lowest + * virtual address of the access. + * It is recommended that there are additional compare values for + * the other accessed virtual addresses. + * (E.g. on a 32-bit read from 0x4000, the lowest address is 0x4000 + * and the other addresses are 0x4001, 0x4002, and 0x4003.) + * + * 1: There is exactly one compare value and it contains the data + * value loaded or stored, or the instruction executed. + * Any bits beyond the size of the data access will contain 0. + */ +#define CSR_MCONTROL_SELECT_OFFSET 19 +#define CSR_MCONTROL_SELECT_LENGTH 1 +#define CSR_MCONTROL_SELECT (0x1ULL << CSR_MCONTROL_SELECT_OFFSET) +/* + * 0: The action for this trigger will be taken just before the + * instruction that triggered it is committed, but after all preceding + * instructions are committed. \Rxepc or \RcsrDpc (depending + * on \FcsrMcontrolAction) must be set to the virtual address of the + * instruction that matched. + * + * If this is combined with \FcsrMcontrolLoad and + * \FcsrMcontrolSelect=1 then a memory access will be + * performed (including any side effects of performing such an access) even + * though the load will not update its destination register. Debuggers + * should consider this when setting such breakpoints on, for example, + * memory-mapped I/O addresses. + * + * 1: The action for this trigger will be taken after the instruction + * that triggered it is committed. It should be taken before the next + * instruction is committed, but it is better to implement triggers imprecisely + * than to not implement them at all. \Rxepc or + * \RcsrDpc (depending on \FcsrMcontrolAction) must be set to + * the virtual address of the next instruction that must be executed to + * preserve the program flow. + * + * Most hardware will only implement one timing or the other, possibly + * dependent on \FcsrMcontrolSelect, \FcsrMcontrolExecute, + * \FcsrMcontrolLoad, and \FcsrMcontrolStore. This bit + * primarily exists for the hardware to communicate to the debugger + * what will happen. Hardware may implement the bit fully writable, in + * which case the debugger has a little more control. + * + * Data load triggers with \FcsrMcontrolTiming of 0 will result in the same load + * happening again when the debugger lets the hart run. For data load + * triggers, debuggers must first attempt to set the breakpoint with + * \FcsrMcontrolTiming of 1. + * + * If a trigger with \FcsrMcontrolTiming of 0 matches, it is + * implementation-dependent whether that prevents a trigger with + * \FcsrMcontrolTiming of 1 matching as well. + */ +#define CSR_MCONTROL_TIMING_OFFSET 18 +#define CSR_MCONTROL_TIMING_LENGTH 1 +#define CSR_MCONTROL_TIMING (0x1ULL << CSR_MCONTROL_TIMING_OFFSET) +/* + * This field contains the 2 low bits of the access size. The high bits come + * from \FcsrMcontrolSizehi. The combined value is interpreted as follows: + * + * 0: The trigger will attempt to match against an access of any size. + * The behavior is only well-defined if $|select|=0$, or if the access + * size is XLEN. + * + * 1: The trigger will only match against 8-bit memory accesses. + * + * 2: The trigger will only match against 16-bit memory accesses or + * execution of 16-bit instructions. + * + * 3: The trigger will only match against 32-bit memory accesses or + * execution of 32-bit instructions. + * + * 4: The trigger will only match against execution of 48-bit instructions. + * + * 5: The trigger will only match against 64-bit memory accesses or + * execution of 64-bit instructions. + * + * 6: The trigger will only match against execution of 80-bit instructions. + * + * 7: The trigger will only match against execution of 96-bit instructions. + * + * 8: The trigger will only match against execution of 112-bit instructions. + * + * 9: The trigger will only match against 128-bit memory accesses or + * execution of 128-bit instructions. + * + * An implementation must support the value of 0, but all other values + * are optional. When an implementation supports address triggers + * (\FcsrMcontrolSelect=0), it is recommended that those triggers + * support every access size that the hart supports, as well as for + * every instruction size that the hart supports. + * + * Implementations such as RV32D or RV64V are able to perform loads + * and stores that are wider than XLEN. Custom extensions may also + * support instructions that are wider than XLEN. Because + * \RcsrTdataTwo is of size XLEN, there is a known limitation that + * data value triggers (\FcsrMcontrolSelect=1) can only be supported + * for access sizes up to XLEN bits. When an implementation supports + * data value triggers (\FcsrMcontrolSelect=1), it is recommended + * that those triggers support every access size up to XLEN that the + * hart supports, as well as for every instruction length up to XLEN + * that the hart supports. + */ +#define CSR_MCONTROL_SIZELO_OFFSET 16 +#define CSR_MCONTROL_SIZELO_LENGTH 2 +#define CSR_MCONTROL_SIZELO (0x3ULL << CSR_MCONTROL_SIZELO_OFFSET) +/* + * The action to take when the trigger fires. The values are explained + * in Table~\ref{tab:action}. + */ +#define CSR_MCONTROL_ACTION_OFFSET 12 +#define CSR_MCONTROL_ACTION_LENGTH 4 +#define CSR_MCONTROL_ACTION (0xfULL << CSR_MCONTROL_ACTION_OFFSET) +/* + * 0: When this trigger matches, the configured action is taken. + * + * 1: While this trigger does not match, it prevents the trigger with + * the next index from matching. + * + * A trigger chain starts on the first trigger with $|chain|=1$ after + * a trigger with $|chain|=0$, or simply on the first trigger if that + * has $|chain|=1$. It ends on the first trigger after that which has + * $|chain|=0$. This final trigger is part of the chain. The action + * on all but the final trigger is ignored. The action on that final + * trigger will be taken if and only if all the triggers in the chain + * match at the same time. + * + * Debuggers should not terminate a chain with a trigger with a + * different type. It is undefined when exactly such a chain fires. + * + * Because \FcsrMcontrolChain affects the next trigger, hardware must zero it in + * writes to \RcsrMcontrol that set \FcsrTdataOneDmode to 0 if the next trigger has + * \FcsrTdataOneDmode of 1. + * In addition hardware should ignore writes to \RcsrMcontrol that set + * \FcsrTdataOneDmode to 1 if the previous trigger has both \FcsrTdataOneDmode of 0 and + * \FcsrMcontrolChain of 1. Debuggers must avoid the latter case by checking + * \FcsrMcontrolChain on the previous trigger if they're writing \RcsrMcontrol. + * + * Implementations that wish to limit the maximum length of a trigger + * chain (eg. to meet timing requirements) may do so by zeroing + * \FcsrMcontrolChain in writes to \RcsrMcontrol that would make the chain too long. + */ +#define CSR_MCONTROL_CHAIN_OFFSET 11 +#define CSR_MCONTROL_CHAIN_LENGTH 1 +#define CSR_MCONTROL_CHAIN (0x1ULL << CSR_MCONTROL_CHAIN_OFFSET) +/* + * 0: Matches when any compare value equals \RcsrTdataTwo. + * + * 1: Matches when the top $M$ bits of any compare value match the top + * $M$ bits of \RcsrTdataTwo. + * $M$ is $|XLEN|-1$ minus the index of the least-significant + * bit containing 0 in \RcsrTdataTwo. Debuggers should only write values + * to \RcsrTdataTwo such that $M + $\FcsrMcontrolMaskmax$ \geq |XLEN|$ + * and $M\gt0$ , otherwise it's undefined on what conditions the + * trigger will match. + * + * 2: Matches when any compare value is greater than (unsigned) or + * equal to \RcsrTdataTwo. + * + * 3: Matches when any compare value is less than (unsigned) + * \RcsrTdataTwo. + * + * 4: Matches when $\frac{|XLEN|}{2}-1$:$0$ of any compare value + * equals $\frac{|XLEN|}{2}-1$:$0$ of \RcsrTdataTwo after + * $\frac{|XLEN|}{2}-1$:$0$ of the compare value is ANDed with + * $|XLEN|-1$:$\frac{|XLEN|}{2}$ of \RcsrTdataTwo. + * + * 5: Matches when $|XLEN|-1$:$\frac{|XLEN|}{2}$ of any compare + * value equals $\frac{|XLEN|}{2}-1$:$0$ of \RcsrTdataTwo after + * $|XLEN|-1$:$\frac{|XLEN|}{2}$ of the compare value is ANDed with + * $|XLEN|-1$:$\frac{|XLEN|}{2}$ of \RcsrTdataTwo. + * + * 8: Matches when \FcsrMcontrolMatch$=0$ would not match. + * + * 9: Matches when \FcsrMcontrolMatch$=1$ would not match. + * + * 12: Matches when \FcsrMcontrolMatch$=4$ would not match. + * + * 13: Matches when \FcsrMcontrolMatch$=5$ would not match. + * + * Other values are reserved for future use. + * + * All comparisons only look at the lower XLEN (in the current mode) + * bits of the compare values and of \RcsrTdataTwo. + * When \FcsrMcontrolSelect=1 and access size is N, this is further + * reduced, and comparisons only look at the lower N bits of the + * compare values and of \RcsrTdataTwo. + */ +#define CSR_MCONTROL_MATCH_OFFSET 7 +#define CSR_MCONTROL_MATCH_LENGTH 4 +#define CSR_MCONTROL_MATCH (0xfULL << CSR_MCONTROL_MATCH_OFFSET) +/* + * When set, enable this trigger in M-mode. + */ +#define CSR_MCONTROL_M_OFFSET 6 +#define CSR_MCONTROL_M_LENGTH 1 +#define CSR_MCONTROL_M (0x1ULL << CSR_MCONTROL_M_OFFSET) +/* + * When set, enable this trigger in S/HS-mode. + * This bit is hard-wired to 0 if the hart does not support + * S-mode. + */ +#define CSR_MCONTROL_S_OFFSET 4 +#define CSR_MCONTROL_S_LENGTH 1 +#define CSR_MCONTROL_S (0x1ULL << CSR_MCONTROL_S_OFFSET) +/* + * When set, enable this trigger in U-mode. + * This bit is hard-wired to 0 if the hart does not support + * U-mode. + */ +#define CSR_MCONTROL_U_OFFSET 3 +#define CSR_MCONTROL_U_LENGTH 1 +#define CSR_MCONTROL_U (0x1ULL << CSR_MCONTROL_U_OFFSET) +/* + * When set, the trigger fires on the virtual address or opcode of an + * instruction that is executed. + */ +#define CSR_MCONTROL_EXECUTE_OFFSET 2 +#define CSR_MCONTROL_EXECUTE_LENGTH 1 +#define CSR_MCONTROL_EXECUTE (0x1ULL << CSR_MCONTROL_EXECUTE_OFFSET) +/* + * When set, the trigger fires on the virtual address or data of any + * store. + */ +#define CSR_MCONTROL_STORE_OFFSET 1 +#define CSR_MCONTROL_STORE_LENGTH 1 +#define CSR_MCONTROL_STORE (0x1ULL << CSR_MCONTROL_STORE_OFFSET) +/* + * When set, the trigger fires on the virtual address or data of any + * load. + */ +#define CSR_MCONTROL_LOAD_OFFSET 0 +#define CSR_MCONTROL_LOAD_LENGTH 1 +#define CSR_MCONTROL_LOAD (0x1ULL << CSR_MCONTROL_LOAD_OFFSET) +#define CSR_MCONTROL6 0x7a1 +#define CSR_MCONTROL6_TYPE_OFFSET (XLEN-4) +#define CSR_MCONTROL6_TYPE_LENGTH 4 +#define CSR_MCONTROL6_TYPE (0xfULL << CSR_MCONTROL6_TYPE_OFFSET) +#define CSR_MCONTROL6_DMODE_OFFSET (XLEN-5) +#define CSR_MCONTROL6_DMODE_LENGTH 1 +#define CSR_MCONTROL6_DMODE (0x1ULL << CSR_MCONTROL6_DMODE_OFFSET) +/* + * When set, enable this trigger in VS-mode. + * This bit is hard-wired to 0 if the hart does not support + * virtualization mode. + */ +#define CSR_MCONTROL6_VS_OFFSET 24 +#define CSR_MCONTROL6_VS_LENGTH 1 +#define CSR_MCONTROL6_VS (0x1ULL << CSR_MCONTROL6_VS_OFFSET) +/* + * When set, enable this trigger in VU-mode. + * This bit is hard-wired to 0 if the hart does not support + * virtualization mode. + */ +#define CSR_MCONTROL6_VU_OFFSET 23 +#define CSR_MCONTROL6_VU_LENGTH 1 +#define CSR_MCONTROL6_VU (0x1ULL << CSR_MCONTROL6_VU_OFFSET) +/* + * If this bit is implemented then it must become set when this + * trigger fires and may become set when this trigger matches. + * The trigger's user can set or clear it at any + * time. It is used to determine which + * trigger(s) matched. If the bit is not implemented, it is always 0 + * and writing it has no effect. + */ +#define CSR_MCONTROL6_HIT_OFFSET 22 +#define CSR_MCONTROL6_HIT_LENGTH 1 +#define CSR_MCONTROL6_HIT (0x1ULL << CSR_MCONTROL6_HIT_OFFSET) +/* + * This bit determines the contents of the XLEN-bit compare values. + * + * 0: There is at least one compare value and it contains the lowest + * virtual address of the access. + * In addition, it is recommended that there are additional compare + * values for the other accessed virtual addresses match. + * (E.g. on a 32-bit read from 0x4000, the lowest address is 0x4000 + * and the other addresses are 0x4001, 0x4002, and 0x4003.) + * + * 1: There is exactly one compare value and it contains the data + * value loaded or stored, or the instruction executed. + * Any bits beyond the size of the data access will contain 0. + */ +#define CSR_MCONTROL6_SELECT_OFFSET 21 +#define CSR_MCONTROL6_SELECT_LENGTH 1 +#define CSR_MCONTROL6_SELECT (0x1ULL << CSR_MCONTROL6_SELECT_OFFSET) +/* + * 0: The action for this trigger will be taken just before the + * instruction that triggered it is committed, but after all preceding + * instructions are committed. \Rxepc or \RcsrDpc (depending + * on \FcsrMcontrolSixAction) must be set to the virtual address of the + * instruction that matched. + * + * If this is combined with \FcsrMcontrolSixLoad and + * \FcsrMcontrolSixSelect=1 then a memory access will be + * performed (including any side effects of performing such an access) even + * though the load will not update its destination register. Debuggers + * should consider this when setting such breakpoints on, for example, + * memory-mapped I/O addresses. + * + * 1: The action for this trigger will be taken after the instruction + * that triggered it is committed. It should be taken before the next + * instruction is committed, but it is better to implement triggers imprecisely + * than to not implement them at all. \Rxepc or + * \RcsrDpc (depending on \FcsrMcontrolSixAction) must be set to + * the virtual address of the next instruction that must be executed to + * preserve the program flow. + * + * Most hardware will only implement one timing or the other, possibly + * dependent on \FcsrMcontrolSixSelect, \FcsrMcontrolSixExecute, + * \FcsrMcontrolSixLoad, and \FcsrMcontrolSixStore. This bit + * primarily exists for the hardware to communicate to the debugger + * what will happen. Hardware may implement the bit fully writable, in + * which case the debugger has a little more control. + * + * Data load triggers with \FcsrMcontrolSixTiming of 0 will result in the same load + * happening again when the debugger lets the hart run. For data load + * triggers, debuggers must first attempt to set the breakpoint with + * \FcsrMcontrolSixTiming of 1. + * + * If a trigger with \FcsrMcontrolSixTiming of 0 matches, it is + * implementation-dependent whether that prevents a trigger with + * \FcsrMcontrolSixTiming of 1 matching as well. + */ +#define CSR_MCONTROL6_TIMING_OFFSET 20 +#define CSR_MCONTROL6_TIMING_LENGTH 1 +#define CSR_MCONTROL6_TIMING (0x1ULL << CSR_MCONTROL6_TIMING_OFFSET) +/* + * 0: The trigger will attempt to match against an access of any size. + * The behavior is only well-defined if $|select|=0$, or if the access + * size is XLEN. + * + * 1: The trigger will only match against 8-bit memory accesses. + * + * 2: The trigger will only match against 16-bit memory accesses or + * execution of 16-bit instructions. + * + * 3: The trigger will only match against 32-bit memory accesses or + * execution of 32-bit instructions. + * + * 4: The trigger will only match against execution of 48-bit instructions. + * + * 5: The trigger will only match against 64-bit memory accesses or + * execution of 64-bit instructions. + * + * 6: The trigger will only match against execution of 80-bit instructions. + * + * 7: The trigger will only match against execution of 96-bit instructions. + * + * 8: The trigger will only match against execution of 112-bit instructions. + * + * 9: The trigger will only match against 128-bit memory accesses or + * execution of 128-bit instructions. + * + * An implementation must support the value of 0, but all other values + * are optional. When an implementation supports address triggers + * (\FcsrMcontrolSixSelect=0), it is recommended that those triggers + * support every access size that the hart supports, as well as for + * every instruction size that the hart supports. + * + * Implementations such as RV32D or RV64V are able to perform loads + * and stores that are wider than XLEN. Custom extensions may also + * support instructions that are wider than XLEN. Because + * \RcsrTdataTwo is of size XLEN, there is a known limitation that + * data value triggers (\FcsrMcontrolSixSelect=1) can only be supported + * for access sizes up to XLEN bits. When an implementation supports + * data value triggers (\FcsrMcontrolSixSelect=1), it is recommended + * that those triggers support every access size up to XLEN that the + * hart supports, as well as for every instruction length up to XLEN + * that the hart supports. + */ +#define CSR_MCONTROL6_SIZE_OFFSET 16 +#define CSR_MCONTROL6_SIZE_LENGTH 4 +#define CSR_MCONTROL6_SIZE (0xfULL << CSR_MCONTROL6_SIZE_OFFSET) +/* + * The action to take when the trigger fires. The values are explained + * in Table~\ref{tab:action}. + */ +#define CSR_MCONTROL6_ACTION_OFFSET 12 +#define CSR_MCONTROL6_ACTION_LENGTH 4 +#define CSR_MCONTROL6_ACTION (0xfULL << CSR_MCONTROL6_ACTION_OFFSET) +/* + * 0: When this trigger matches, the configured action is taken. + * + * 1: While this trigger does not match, it prevents the trigger with + * the next index from matching. + * + * A trigger chain starts on the first trigger with $|chain|=1$ after + * a trigger with $|chain|=0$, or simply on the first trigger if that + * has $|chain|=1$. It ends on the first trigger after that which has + * $|chain|=0$. This final trigger is part of the chain. The action + * on all but the final trigger is ignored. The action on that final + * trigger will be taken if and only if all the triggers in the chain + * match at the same time. + * + * Debuggers should not terminate a chain with a trigger with a + * different type. It is undefined when exactly such a chain fires. + * + * Because \FcsrMcontrolSixChain affects the next trigger, hardware must zero it in + * writes to \RcsrMcontrolSix that set \FcsrTdataOneDmode to 0 if the next trigger has + * \FcsrTdataOneDmode of 1. + * In addition hardware should ignore writes to \RcsrMcontrolSix that set + * \FcsrTdataOneDmode to 1 if the previous trigger has both \FcsrTdataOneDmode of 0 and + * \FcsrMcontrolSixChain of 1. Debuggers must avoid the latter case by checking + * \FcsrMcontrolSixChain on the previous trigger if they're writing \RcsrMcontrolSix. + * + * Implementations that wish to limit the maximum length of a trigger + * chain (eg. to meet timing requirements) may do so by zeroing + * \FcsrMcontrolSixChain in writes to \RcsrMcontrolSix that would make the chain too long. + */ +#define CSR_MCONTROL6_CHAIN_OFFSET 11 +#define CSR_MCONTROL6_CHAIN_LENGTH 1 +#define CSR_MCONTROL6_CHAIN (0x1ULL << CSR_MCONTROL6_CHAIN_OFFSET) +/* + * 0: Matches when any compare value equals \RcsrTdataTwo. + * + * 1: Matches when the top $M$ bits of any compare value match the top + * $M$ bits of \RcsrTdataTwo. + * $M$ is $|XLEN|-1$ minus the index of the least-significant bit + * containing 0 in \RcsrTdataTwo. + * \RcsrTdataTwo is WARL and bit $|maskmax6|-1$ will be set to 0 if no + * less significant bits are written with 0. + * Legal values for \RcsrTdataTwo require $M + |maskmax6| \geq |XLEN|$ and $M\gt0$. + * See above for how to determine maskmax6. + * + * 2: Matches when any compare value is greater than (unsigned) or + * equal to \RcsrTdataTwo. + * + * 3: Matches when any compare value is less than (unsigned) + * \RcsrTdataTwo. + * + * 4: Matches when $\frac{|XLEN|}{2}-1$:$0$ of any compare value + * equals $\frac{|XLEN|}{2}-1$:$0$ of \RcsrTdataTwo after + * $\frac{|XLEN|}{2}-1$:$0$ of the compare value is ANDed with + * $|XLEN|-1$:$\frac{|XLEN|}{2}$ of \RcsrTdataTwo. + * + * 5: Matches when $|XLEN|-1$:$\frac{|XLEN|}{2}$ of any compare + * value equals $\frac{|XLEN|}{2}-1$:$0$ of \RcsrTdataTwo after + * $|XLEN|-1$:$\frac{|XLEN|}{2}$ of the compare value is ANDed with + * $|XLEN|-1$:$\frac{|XLEN|}{2}$ of \RcsrTdataTwo. + * + * 8: Matches when \FcsrMcontrolSixMatch$=0$ would not match. + * + * 9: Matches when \FcsrMcontrolSixMatch$=1$ would not match. + * + * 12: Matches when \FcsrMcontrolSixMatch$=4$ would not match. + * + * 13: Matches when \FcsrMcontrolSixMatch$=5$ would not match. + * + * Other values are reserved for future use. + * + * All comparisons only look at the lower XLEN (in the current mode) + * bits of the compare values and of \RcsrTdataTwo. + * When \FcsrMcontrolSelect=1 and access size is N, this is further + * reduced, and comparisons only look at the lower N bits of the + * compare values and of \RcsrTdataTwo. + */ +#define CSR_MCONTROL6_MATCH_OFFSET 7 +#define CSR_MCONTROL6_MATCH_LENGTH 4 +#define CSR_MCONTROL6_MATCH (0xfULL << CSR_MCONTROL6_MATCH_OFFSET) +/* + * When set, enable this trigger in M-mode. + */ +#define CSR_MCONTROL6_M_OFFSET 6 +#define CSR_MCONTROL6_M_LENGTH 1 +#define CSR_MCONTROL6_M (0x1ULL << CSR_MCONTROL6_M_OFFSET) +/* + * When set, enable this trigger in S/HS-mode. + * This bit is hard-wired to 0 if the hart does not support + * S-mode. + */ +#define CSR_MCONTROL6_S_OFFSET 4 +#define CSR_MCONTROL6_S_LENGTH 1 +#define CSR_MCONTROL6_S (0x1ULL << CSR_MCONTROL6_S_OFFSET) +/* + * When set, enable this trigger in U-mode. + * This bit is hard-wired to 0 if the hart does not support + * U-mode. + */ +#define CSR_MCONTROL6_U_OFFSET 3 +#define CSR_MCONTROL6_U_LENGTH 1 +#define CSR_MCONTROL6_U (0x1ULL << CSR_MCONTROL6_U_OFFSET) +/* + * When set, the trigger fires on the virtual address or opcode of an + * instruction that is executed. + */ +#define CSR_MCONTROL6_EXECUTE_OFFSET 2 +#define CSR_MCONTROL6_EXECUTE_LENGTH 1 +#define CSR_MCONTROL6_EXECUTE (0x1ULL << CSR_MCONTROL6_EXECUTE_OFFSET) +/* + * When set, the trigger fires on the virtual address or data of any + * store. + */ +#define CSR_MCONTROL6_STORE_OFFSET 1 +#define CSR_MCONTROL6_STORE_LENGTH 1 +#define CSR_MCONTROL6_STORE (0x1ULL << CSR_MCONTROL6_STORE_OFFSET) +/* + * When set, the trigger fires on the virtual address or data of any + * load. + */ +#define CSR_MCONTROL6_LOAD_OFFSET 0 +#define CSR_MCONTROL6_LOAD_LENGTH 1 +#define CSR_MCONTROL6_LOAD (0x1ULL << CSR_MCONTROL6_LOAD_OFFSET) +#define CSR_ICOUNT 0x7a1 +#define CSR_ICOUNT_TYPE_OFFSET (XLEN-4) +#define CSR_ICOUNT_TYPE_LENGTH 4 +#define CSR_ICOUNT_TYPE (0xfULL << CSR_ICOUNT_TYPE_OFFSET) +#define CSR_ICOUNT_DMODE_OFFSET (XLEN-5) +#define CSR_ICOUNT_DMODE_LENGTH 1 +#define CSR_ICOUNT_DMODE (0x1ULL << CSR_ICOUNT_DMODE_OFFSET) +/* + * When set, enable this trigger in VS-mode. + * This bit is hard-wired to 0 if the hart does not support + * virtualization mode. + */ +#define CSR_ICOUNT_VS_OFFSET 26 +#define CSR_ICOUNT_VS_LENGTH 1 +#define CSR_ICOUNT_VS (0x1ULL << CSR_ICOUNT_VS_OFFSET) +/* + * When set, enable this trigger in VU-mode. + * This bit is hard-wired to 0 if the hart does not support + * virtualization mode. + */ +#define CSR_ICOUNT_VU_OFFSET 25 +#define CSR_ICOUNT_VU_LENGTH 1 +#define CSR_ICOUNT_VU (0x1ULL << CSR_ICOUNT_VU_OFFSET) +/* + * If this bit is implemented, the hardware sets it when this + * trigger matches. The trigger's user can set or clear it at any + * time. It is used to determine which + * trigger(s) matched. If the bit is not implemented, it is always 0 + * and writing it has no effect. + */ +#define CSR_ICOUNT_HIT_OFFSET 24 +#define CSR_ICOUNT_HIT_LENGTH 1 +#define CSR_ICOUNT_HIT (0x1ULL << CSR_ICOUNT_HIT_OFFSET) +/* + * When count is decremented to 0, the trigger fires. Instead of + * changing \FcsrIcountCount from 1 to 0, it is also acceptable for hardware to + * clear \FcsrIcountM, \FcsrIcountS, \FcsrIcountU, \FcsrIcountVs, and + * \FcsrIcountVu. This allows \FcsrIcountCount to be hard-wired + * to 1 if this register just exists for single step. + */ +#define CSR_ICOUNT_COUNT_OFFSET 10 +#define CSR_ICOUNT_COUNT_LENGTH 14 +#define CSR_ICOUNT_COUNT (0x3fffULL << CSR_ICOUNT_COUNT_OFFSET) +/* + * When set, enable this trigger in M-mode. + */ +#define CSR_ICOUNT_M_OFFSET 9 +#define CSR_ICOUNT_M_LENGTH 1 +#define CSR_ICOUNT_M (0x1ULL << CSR_ICOUNT_M_OFFSET) +/* + * This bit becomes set when \FcsrIcountCount is decremented from 1 + * to 0. It is cleared when the trigger fires. + */ +#define CSR_ICOUNT_PENDING_OFFSET 8 +#define CSR_ICOUNT_PENDING_LENGTH 1 +#define CSR_ICOUNT_PENDING (0x1ULL << CSR_ICOUNT_PENDING_OFFSET) +/* + * When set, enable this trigger in S/HS-mode. + * This bit is hard-wired to 0 if the hart does not support + * S-mode. + */ +#define CSR_ICOUNT_S_OFFSET 7 +#define CSR_ICOUNT_S_LENGTH 1 +#define CSR_ICOUNT_S (0x1ULL << CSR_ICOUNT_S_OFFSET) +/* + * When set, enable this trigger in U-mode. + * This bit is hard-wired to 0 if the hart does not support + * U-mode. + */ +#define CSR_ICOUNT_U_OFFSET 6 +#define CSR_ICOUNT_U_LENGTH 1 +#define CSR_ICOUNT_U (0x1ULL << CSR_ICOUNT_U_OFFSET) +/* + * The action to take when the trigger fires. The values are explained + * in Table~\ref{tab:action}. + */ +#define CSR_ICOUNT_ACTION_OFFSET 0 +#define CSR_ICOUNT_ACTION_LENGTH 6 +#define CSR_ICOUNT_ACTION (0x3fULL << CSR_ICOUNT_ACTION_OFFSET) +#define CSR_ITRIGGER 0x7a1 +#define CSR_ITRIGGER_TYPE_OFFSET (XLEN-4) +#define CSR_ITRIGGER_TYPE_LENGTH 4 +#define CSR_ITRIGGER_TYPE (0xfULL << CSR_ITRIGGER_TYPE_OFFSET) +#define CSR_ITRIGGER_DMODE_OFFSET (XLEN-5) +#define CSR_ITRIGGER_DMODE_LENGTH 1 +#define CSR_ITRIGGER_DMODE (0x1ULL << CSR_ITRIGGER_DMODE_OFFSET) +/* + * If this bit is implemented, the hardware sets it when this + * trigger matches. The trigger's user can set or clear it at any + * time. It is used to determine which + * trigger(s) matched. If the bit is not implemented, it is always 0 + * and writing it has no effect. + */ +#define CSR_ITRIGGER_HIT_OFFSET (XLEN-6) +#define CSR_ITRIGGER_HIT_LENGTH 1 +#define CSR_ITRIGGER_HIT (0x1ULL << CSR_ITRIGGER_HIT_OFFSET) +/* + * When set, enable this trigger for interrupts that are taken from VS + * mode. + * This bit is hard-wired to 0 if the hart does not support + * virtualization mode. + */ +#define CSR_ITRIGGER_VS_OFFSET 12 +#define CSR_ITRIGGER_VS_LENGTH 1 +#define CSR_ITRIGGER_VS (0x1ULL << CSR_ITRIGGER_VS_OFFSET) +/* + * When set, enable this trigger for interrupts that are taken from VU + * mode. + * This bit is hard-wired to 0 if the hart does not support + * virtualization mode. + */ +#define CSR_ITRIGGER_VU_OFFSET 11 +#define CSR_ITRIGGER_VU_LENGTH 1 +#define CSR_ITRIGGER_VU (0x1ULL << CSR_ITRIGGER_VU_OFFSET) +/* + * When set, enable this trigger for interrupts that are taken from M + * mode. + */ +#define CSR_ITRIGGER_M_OFFSET 9 +#define CSR_ITRIGGER_M_LENGTH 1 +#define CSR_ITRIGGER_M (0x1ULL << CSR_ITRIGGER_M_OFFSET) +/* + * When set, enable this trigger for interrupts that are taken from S/HS + * mode. + * This bit is hard-wired to 0 if the hart does not support + * S-mode. + */ +#define CSR_ITRIGGER_S_OFFSET 7 +#define CSR_ITRIGGER_S_LENGTH 1 +#define CSR_ITRIGGER_S (0x1ULL << CSR_ITRIGGER_S_OFFSET) +/* + * When set, enable this trigger for interrupts that are taken from U + * mode. + * This bit is hard-wired to 0 if the hart does not support + * U-mode. + */ +#define CSR_ITRIGGER_U_OFFSET 6 +#define CSR_ITRIGGER_U_LENGTH 1 +#define CSR_ITRIGGER_U (0x1ULL << CSR_ITRIGGER_U_OFFSET) +/* + * The action to take when the trigger fires. The values are explained + * in Table~\ref{tab:action}. + */ +#define CSR_ITRIGGER_ACTION_OFFSET 0 +#define CSR_ITRIGGER_ACTION_LENGTH 6 +#define CSR_ITRIGGER_ACTION (0x3fULL << CSR_ITRIGGER_ACTION_OFFSET) +#define CSR_ETRIGGER 0x7a1 +#define CSR_ETRIGGER_TYPE_OFFSET (XLEN-4) +#define CSR_ETRIGGER_TYPE_LENGTH 4 +#define CSR_ETRIGGER_TYPE (0xfULL << CSR_ETRIGGER_TYPE_OFFSET) +#define CSR_ETRIGGER_DMODE_OFFSET (XLEN-5) +#define CSR_ETRIGGER_DMODE_LENGTH 1 +#define CSR_ETRIGGER_DMODE (0x1ULL << CSR_ETRIGGER_DMODE_OFFSET) +/* + * If this bit is implemented, the hardware sets it when this + * trigger matches. The trigger's user can set or clear it at any + * time. It is used to determine which + * trigger(s) matched. If the bit is not implemented, it is always 0 + * and writing it has no effect. + */ +#define CSR_ETRIGGER_HIT_OFFSET (XLEN-6) +#define CSR_ETRIGGER_HIT_LENGTH 1 +#define CSR_ETRIGGER_HIT (0x1ULL << CSR_ETRIGGER_HIT_OFFSET) +/* + * When set, enable this trigger for exceptions that are taken from VS + * mode. + * This bit is hard-wired to 0 if the hart does not support + * virtualization mode. + */ +#define CSR_ETRIGGER_VS_OFFSET 12 +#define CSR_ETRIGGER_VS_LENGTH 1 +#define CSR_ETRIGGER_VS (0x1ULL << CSR_ETRIGGER_VS_OFFSET) +/* + * When set, enable this trigger for exceptions that are taken from VU + * mode. + * This bit is hard-wired to 0 if the hart does not support + * virtualization mode. + */ +#define CSR_ETRIGGER_VU_OFFSET 11 +#define CSR_ETRIGGER_VU_LENGTH 1 +#define CSR_ETRIGGER_VU (0x1ULL << CSR_ETRIGGER_VU_OFFSET) +/* + * When set, non-maskable interrupts cause this + * trigger to fire, regardless of the values of \FcsrEtriggerM, + * \FcsrEtriggerS, \FcsrEtriggerU, \FcsrEtriggerVs, and \FcsrEtriggerVu. + */ +#define CSR_ETRIGGER_NMI_OFFSET 10 +#define CSR_ETRIGGER_NMI_LENGTH 1 +#define CSR_ETRIGGER_NMI (0x1ULL << CSR_ETRIGGER_NMI_OFFSET) +/* + * When set, enable this trigger for exceptions that are taken from M + * mode. + */ +#define CSR_ETRIGGER_M_OFFSET 9 +#define CSR_ETRIGGER_M_LENGTH 1 +#define CSR_ETRIGGER_M (0x1ULL << CSR_ETRIGGER_M_OFFSET) +/* + * When set, enable this trigger for exceptions that are taken from S/HS + * mode. + * This bit is hard-wired to 0 if the hart does not support + * S-mode. + */ +#define CSR_ETRIGGER_S_OFFSET 7 +#define CSR_ETRIGGER_S_LENGTH 1 +#define CSR_ETRIGGER_S (0x1ULL << CSR_ETRIGGER_S_OFFSET) +/* + * When set, enable this trigger for exceptions that are taken from U + * mode. + * This bit is hard-wired to 0 if the hart does not support + * U-mode. + */ +#define CSR_ETRIGGER_U_OFFSET 6 +#define CSR_ETRIGGER_U_LENGTH 1 +#define CSR_ETRIGGER_U (0x1ULL << CSR_ETRIGGER_U_OFFSET) +/* + * The action to take when the trigger fires. The values are explained + * in Table~\ref{tab:action}. + */ +#define CSR_ETRIGGER_ACTION_OFFSET 0 +#define CSR_ETRIGGER_ACTION_LENGTH 6 +#define CSR_ETRIGGER_ACTION (0x3fULL << CSR_ETRIGGER_ACTION_OFFSET) +#define CSR_TMEXTTRIGGER 0x7a1 +#define CSR_TMEXTTRIGGER_TYPE_OFFSET (XLEN-4) +#define CSR_TMEXTTRIGGER_TYPE_LENGTH 4 +#define CSR_TMEXTTRIGGER_TYPE (0xfULL << CSR_TMEXTTRIGGER_TYPE_OFFSET) +#define CSR_TMEXTTRIGGER_DMODE_OFFSET (XLEN-5) +#define CSR_TMEXTTRIGGER_DMODE_LENGTH 1 +#define CSR_TMEXTTRIGGER_DMODE (0x1ULL << CSR_TMEXTTRIGGER_DMODE_OFFSET) +/* + * If this bit is implemented, the hardware sets it when this + * trigger matches. The trigger's user can set or clear it at any + * time. It is used to determine which + * trigger(s) matched. If the bit is not implemented, it is always 0 + * and writing it has no effect. + */ +#define CSR_TMEXTTRIGGER_HIT_OFFSET (XLEN-6) +#define CSR_TMEXTTRIGGER_HIT_LENGTH 1 +#define CSR_TMEXTTRIGGER_HIT (0x1ULL << CSR_TMEXTTRIGGER_HIT_OFFSET) +/* + * This optional bit, when set, causes this trigger to fire whenever an attached + * interrupt controller signals a trigger. + */ +#define CSR_TMEXTTRIGGER_INTCTL_OFFSET 22 +#define CSR_TMEXTTRIGGER_INTCTL_LENGTH 1 +#define CSR_TMEXTTRIGGER_INTCTL (0x1ULL << CSR_TMEXTTRIGGER_INTCTL_OFFSET) +/* + * Selects any combination of up to 16 external debug trigger inputs + * that cause this trigger to fire. + */ +#define CSR_TMEXTTRIGGER_SELECT_OFFSET 6 +#define CSR_TMEXTTRIGGER_SELECT_LENGTH 16 +#define CSR_TMEXTTRIGGER_SELECT (0xffffULL << CSR_TMEXTTRIGGER_SELECT_OFFSET) +/* + * The action to take when the trigger fires. The values are explained + * in Table~\ref{tab:action}. + */ +#define CSR_TMEXTTRIGGER_ACTION_OFFSET 0 +#define CSR_TMEXTTRIGGER_ACTION_LENGTH 6 +#define CSR_TMEXTTRIGGER_ACTION (0x3fULL << CSR_TMEXTTRIGGER_ACTION_OFFSET) +#define CSR_TEXTRA32 0x7a3 +/* + * Data used together with \FcsrTextraThirtytwoMhselect. + */ +#define CSR_TEXTRA32_MHVALUE_OFFSET 26 +#define CSR_TEXTRA32_MHVALUE_LENGTH 6 +#define CSR_TEXTRA32_MHVALUE (0x3fU << CSR_TEXTRA32_MHVALUE_OFFSET) +/* + * 0: Ignore \FcsrTextraThirtytwoMhvalue. + * + * 4: This trigger will only match if the low bits of + * \RcsrMcontext/\RcsrHcontext equal \FcsrTextraThirtytwoMhvalue. + * + * 1, 5: This trigger will only match if the low bits of + * \RcsrMcontext/\RcsrHcontext equal \{\FcsrTextraThirtytwoMhvalue, mhselect[2]\}. + * + * 2, 6: This trigger will only match if VMID in hgatp equals the lower VMIDMAX + * (defined in the Privileged Spec) bits of \{\FcsrTextraThirtytwoMhvalue, mhselect[2]\}. + * + * 3, 7: Reserved. + * + * If the H extension is not supported, the only legal values are 0 and 4. + */ +#define CSR_TEXTRA32_MHSELECT_OFFSET 23 +#define CSR_TEXTRA32_MHSELECT_LENGTH 3 +#define CSR_TEXTRA32_MHSELECT (0x7U << CSR_TEXTRA32_MHSELECT_OFFSET) +/* + * When the least significant bit of this field is 1, it causes bits 7:0 + * in the comparison to be ignored, when \FcsrTextraThirtytwoSselect=1. + * When the next most significant bit of this field is 1, it causes bits 15:8 + * to be ignored in the comparison, when \FcsrTextraThirtytwoSselect=1. + */ +#define CSR_TEXTRA32_SBYTEMASK_OFFSET 18 +#define CSR_TEXTRA32_SBYTEMASK_LENGTH 2 +#define CSR_TEXTRA32_SBYTEMASK (0x3U << CSR_TEXTRA32_SBYTEMASK_OFFSET) +/* + * Data used together with \FcsrTextraThirtytwoSselect. + * + * This field should be tied to 0 when S-mode is not supported. + */ +#define CSR_TEXTRA32_SVALUE_OFFSET 2 +#define CSR_TEXTRA32_SVALUE_LENGTH 16 +#define CSR_TEXTRA32_SVALUE (0xffffU << CSR_TEXTRA32_SVALUE_OFFSET) +/* + * 0: Ignore \FcsrTextraThirtytwoSvalue. + * + * 1: This trigger will only match if the low bits of + * \RcsrScontext equal \FcsrTextraThirtytwoSvalue. + * + * 2: This trigger will only match if: + * \begin{itemize}[noitemsep,nolistsep] + * \item the mode is VS-mode or VU-mode and ASID in \Rvsatp + * equals the lower ASIDMAX (defined in the Privileged Spec) bits + * of \FcsrTextraThirtytwoSvalue. + * \item in all other modes, ASID in \Rsatp equals the lower + * ASIDMAX (defined in the Privileged Spec) bits of + * \FcsrTextraThirtytwoSvalue. + * \end{itemize} + * + * This field should be tied to 0 when S-mode is not supported. + */ +#define CSR_TEXTRA32_SSELECT_OFFSET 0 +#define CSR_TEXTRA32_SSELECT_LENGTH 2 +#define CSR_TEXTRA32_SSELECT (0x3U << CSR_TEXTRA32_SSELECT_OFFSET) +#define CSR_TEXTRA64 0x7a3 +#define CSR_TEXTRA64_MHVALUE_OFFSET 51 +#define CSR_TEXTRA64_MHVALUE_LENGTH 13 +#define CSR_TEXTRA64_MHVALUE (0x1fffULL << CSR_TEXTRA64_MHVALUE_OFFSET) +#define CSR_TEXTRA64_MHSELECT_OFFSET 48 +#define CSR_TEXTRA64_MHSELECT_LENGTH 3 +#define CSR_TEXTRA64_MHSELECT (0x7ULL << CSR_TEXTRA64_MHSELECT_OFFSET) +/* + * When the least significant bit of this field is 1, it causes bits 7:0 + * in the comparison to be ignored, when \FcsrTextraSixtyfourSselect=1. + * Likewise, the second bit controls the comparison of bits 15:8, + * third bit controls the comparison of bits 23:16, + * fourth bit controls the comparison of bits 31:24, and + * fifth bit controls the comparison of bits 33:32. + */ +#define CSR_TEXTRA64_SBYTEMASK_OFFSET 36 +#define CSR_TEXTRA64_SBYTEMASK_LENGTH 5 +#define CSR_TEXTRA64_SBYTEMASK (0x1fULL << CSR_TEXTRA64_SBYTEMASK_OFFSET) +#define CSR_TEXTRA64_SVALUE_OFFSET 2 +#define CSR_TEXTRA64_SVALUE_LENGTH 34 +#define CSR_TEXTRA64_SVALUE (0x3ffffffffULL << CSR_TEXTRA64_SVALUE_OFFSET) +#define CSR_TEXTRA64_SSELECT_OFFSET 0 +#define CSR_TEXTRA64_SSELECT_LENGTH 2 +#define CSR_TEXTRA64_SSELECT (0x3ULL << CSR_TEXTRA64_SSELECT_OFFSET) +#define DM_DMSTATUS 0x11 +/* + * 0: Unimplemented, or \FdmDmcontrolNdmreset is zero and no ndmreset is currently + * in progress. + * + * 1: \FdmDmcontrolNdmreset is currently nonzero, or there is an ndmreset in progress. + */ +#define DM_DMSTATUS_NDMRESETPENDING_OFFSET 24 +#define DM_DMSTATUS_NDMRESETPENDING_LENGTH 1 +#define DM_DMSTATUS_NDMRESETPENDING (0x1U << DM_DMSTATUS_NDMRESETPENDING_OFFSET) +/* + * 0: The per-hart {\tt unavail} bits reflect the current state of the hart. + * + * 1: The per-hart {\tt unavail} bits are sticky. Once they are set, they will + * not clear until the debugger acknowledges them using \FdmDmcontrolAckunavail. + */ +#define DM_DMSTATUS_STICKYUNAVAIL_OFFSET 23 +#define DM_DMSTATUS_STICKYUNAVAIL_LENGTH 1 +#define DM_DMSTATUS_STICKYUNAVAIL (0x1U << DM_DMSTATUS_STICKYUNAVAIL_OFFSET) +/* + * If 1, then there is an implicit {\tt ebreak} instruction at the + * non-existent word immediately after the Program Buffer. This saves + * the debugger from having to write the {\tt ebreak} itself, and + * allows the Program Buffer to be one word smaller. + * + * This must be 1 when \FdmAbstractcsProgbufsize is 1. + */ +#define DM_DMSTATUS_IMPEBREAK_OFFSET 22 +#define DM_DMSTATUS_IMPEBREAK_LENGTH 1 +#define DM_DMSTATUS_IMPEBREAK (0x1U << DM_DMSTATUS_IMPEBREAK_OFFSET) +/* + * This field is 1 when all currently selected harts have been reset + * and reset has not been acknowledged for any of them. + */ +#define DM_DMSTATUS_ALLHAVERESET_OFFSET 19 +#define DM_DMSTATUS_ALLHAVERESET_LENGTH 1 +#define DM_DMSTATUS_ALLHAVERESET (0x1U << DM_DMSTATUS_ALLHAVERESET_OFFSET) +/* + * This field is 1 when at least one currently selected hart has been + * reset and reset has not been acknowledged for that hart. + */ +#define DM_DMSTATUS_ANYHAVERESET_OFFSET 18 +#define DM_DMSTATUS_ANYHAVERESET_LENGTH 1 +#define DM_DMSTATUS_ANYHAVERESET (0x1U << DM_DMSTATUS_ANYHAVERESET_OFFSET) +/* + * This field is 1 when all currently selected harts have their + * resume ack bit\index{resume ack bit} set. + */ +#define DM_DMSTATUS_ALLRESUMEACK_OFFSET 17 +#define DM_DMSTATUS_ALLRESUMEACK_LENGTH 1 +#define DM_DMSTATUS_ALLRESUMEACK (0x1U << DM_DMSTATUS_ALLRESUMEACK_OFFSET) +/* + * This field is 1 when any currently selected hart has its + * resume ack bit\index{resume ack bit} set. + */ +#define DM_DMSTATUS_ANYRESUMEACK_OFFSET 16 +#define DM_DMSTATUS_ANYRESUMEACK_LENGTH 1 +#define DM_DMSTATUS_ANYRESUMEACK (0x1U << DM_DMSTATUS_ANYRESUMEACK_OFFSET) +/* + * This field is 1 when all currently selected harts do not exist in + * this hardware platform. + */ +#define DM_DMSTATUS_ALLNONEXISTENT_OFFSET 15 +#define DM_DMSTATUS_ALLNONEXISTENT_LENGTH 1 +#define DM_DMSTATUS_ALLNONEXISTENT (0x1U << DM_DMSTATUS_ALLNONEXISTENT_OFFSET) +/* + * This field is 1 when any currently selected hart does not exist in + * this hardware platform. + */ +#define DM_DMSTATUS_ANYNONEXISTENT_OFFSET 14 +#define DM_DMSTATUS_ANYNONEXISTENT_LENGTH 1 +#define DM_DMSTATUS_ANYNONEXISTENT (0x1U << DM_DMSTATUS_ANYNONEXISTENT_OFFSET) +/* + * This field is 1 when all currently selected harts are + * unavailable, or (if \FdmDmstatusStickyunavail is 1) were + * unavailable without that being acknowledged. + */ +#define DM_DMSTATUS_ALLUNAVAIL_OFFSET 13 +#define DM_DMSTATUS_ALLUNAVAIL_LENGTH 1 +#define DM_DMSTATUS_ALLUNAVAIL (0x1U << DM_DMSTATUS_ALLUNAVAIL_OFFSET) +/* + * This field is 1 when any currently selected hart is unavailable, + * or (if \FdmDmstatusStickyunavail is 1) was unavailable without + * that being acknowledged. + */ +#define DM_DMSTATUS_ANYUNAVAIL_OFFSET 12 +#define DM_DMSTATUS_ANYUNAVAIL_LENGTH 1 +#define DM_DMSTATUS_ANYUNAVAIL (0x1U << DM_DMSTATUS_ANYUNAVAIL_OFFSET) +/* + * This field is 1 when all currently selected harts are running. + */ +#define DM_DMSTATUS_ALLRUNNING_OFFSET 11 +#define DM_DMSTATUS_ALLRUNNING_LENGTH 1 +#define DM_DMSTATUS_ALLRUNNING (0x1U << DM_DMSTATUS_ALLRUNNING_OFFSET) +/* + * This field is 1 when any currently selected hart is running. + */ +#define DM_DMSTATUS_ANYRUNNING_OFFSET 10 +#define DM_DMSTATUS_ANYRUNNING_LENGTH 1 +#define DM_DMSTATUS_ANYRUNNING (0x1U << DM_DMSTATUS_ANYRUNNING_OFFSET) +/* + * This field is 1 when all currently selected harts are halted. + */ +#define DM_DMSTATUS_ALLHALTED_OFFSET 9 +#define DM_DMSTATUS_ALLHALTED_LENGTH 1 +#define DM_DMSTATUS_ALLHALTED (0x1U << DM_DMSTATUS_ALLHALTED_OFFSET) +/* + * This field is 1 when any currently selected hart is halted. + */ +#define DM_DMSTATUS_ANYHALTED_OFFSET 8 +#define DM_DMSTATUS_ANYHALTED_LENGTH 1 +#define DM_DMSTATUS_ANYHALTED (0x1U << DM_DMSTATUS_ANYHALTED_OFFSET) +/* + * 0: Authentication is required before using the DM. + * + * 1: The authentication check has passed. + * + * On components that don't implement authentication, this bit must be + * preset as 1. + */ +#define DM_DMSTATUS_AUTHENTICATED_OFFSET 7 +#define DM_DMSTATUS_AUTHENTICATED_LENGTH 1 +#define DM_DMSTATUS_AUTHENTICATED (0x1U << DM_DMSTATUS_AUTHENTICATED_OFFSET) +/* + * 0: The authentication module is ready to process the next + * read/write to \RdmAuthdata. + * + * 1: The authentication module is busy. Accessing \RdmAuthdata results + * in unspecified behavior. + * + * \FdmDmstatusAuthbusy only becomes set in immediate response to an access to + * \RdmAuthdata. + */ +#define DM_DMSTATUS_AUTHBUSY_OFFSET 6 +#define DM_DMSTATUS_AUTHBUSY_LENGTH 1 +#define DM_DMSTATUS_AUTHBUSY (0x1U << DM_DMSTATUS_AUTHBUSY_OFFSET) +/* + * 1 if this Debug Module supports halt-on-reset functionality + * controllable by the \FdmDmcontrolSetresethaltreq and \FdmDmcontrolClrresethaltreq bits. + * 0 otherwise. + */ +#define DM_DMSTATUS_HASRESETHALTREQ_OFFSET 5 +#define DM_DMSTATUS_HASRESETHALTREQ_LENGTH 1 +#define DM_DMSTATUS_HASRESETHALTREQ (0x1U << DM_DMSTATUS_HASRESETHALTREQ_OFFSET) +/* + * 0: \RdmConfstrptrZero--\RdmConfstrptrThree hold information which + * is not relevant to the configuration string. + * + * 1: \RdmConfstrptrZero--\RdmConfstrptrThree hold the address of the + * configuration string. + */ +#define DM_DMSTATUS_CONFSTRPTRVALID_OFFSET 4 +#define DM_DMSTATUS_CONFSTRPTRVALID_LENGTH 1 +#define DM_DMSTATUS_CONFSTRPTRVALID (0x1U << DM_DMSTATUS_CONFSTRPTRVALID_OFFSET) +/* + * 0: There is no Debug Module present. + * + * 1: There is a Debug Module and it conforms to version 0.11 of this + * specification. + * + * 2: There is a Debug Module and it conforms to version 0.13 of this + * specification. + * + * 3: There is a Debug Module and it conforms to version 1.0 of this + * specification. + * + * 15: There is a Debug Module but it does not conform to any + * available version of this spec. + */ +#define DM_DMSTATUS_VERSION_OFFSET 0 +#define DM_DMSTATUS_VERSION_LENGTH 4 +#define DM_DMSTATUS_VERSION (0xfU << DM_DMSTATUS_VERSION_OFFSET) +#define DM_DMCONTROL 0x10 +/* + * Writing 0 clears the halt request bit for all currently selected + * harts. This may cancel outstanding halt requests for those harts. + * + * Writing 1 sets the halt request bit for all currently selected + * harts. Running harts will halt whenever their halt request bit is + * set. + * + * Writes apply to the new value of \Fhartsel and \FdmDmcontrolHasel. + */ +#define DM_DMCONTROL_HALTREQ_OFFSET 31 +#define DM_DMCONTROL_HALTREQ_LENGTH 1 +#define DM_DMCONTROL_HALTREQ (0x1U << DM_DMCONTROL_HALTREQ_OFFSET) +/* + * Writing 1 causes the currently selected harts to resume once, if + * they are halted when the write occurs. It also clears the resume + * ack bit for those harts. + * + * \FdmDmcontrolResumereq is ignored if \FdmDmcontrolHaltreq is set. + * + * Writes apply to the new value of \Fhartsel and \FdmDmcontrolHasel. + */ +#define DM_DMCONTROL_RESUMEREQ_OFFSET 30 +#define DM_DMCONTROL_RESUMEREQ_LENGTH 1 +#define DM_DMCONTROL_RESUMEREQ (0x1U << DM_DMCONTROL_RESUMEREQ_OFFSET) +/* + * This optional field writes the reset bit for all the currently + * selected harts. To perform a reset the debugger writes 1, and then + * writes 0 to deassert the reset signal. + * + * While this bit is 1, the debugger must not change which harts are + * selected. + * + * If this feature is not implemented, the bit always stays 0, so + * after writing 1 the debugger can read the register back to see if + * the feature is supported. + * + * Writes apply to the new value of \Fhartsel and \FdmDmcontrolHasel. + */ +#define DM_DMCONTROL_HARTRESET_OFFSET 29 +#define DM_DMCONTROL_HARTRESET_LENGTH 1 +#define DM_DMCONTROL_HARTRESET (0x1U << DM_DMCONTROL_HARTRESET_OFFSET) +/* + * 0: No effect. + * + * 1: Clears {\tt havereset} for any selected harts. + * + * Writes apply to the new value of \Fhartsel and \FdmDmcontrolHasel. + */ +#define DM_DMCONTROL_ACKHAVERESET_OFFSET 28 +#define DM_DMCONTROL_ACKHAVERESET_LENGTH 1 +#define DM_DMCONTROL_ACKHAVERESET (0x1U << DM_DMCONTROL_ACKHAVERESET_OFFSET) +/* + * 0: No effect. + * + * 1: Clears {\tt unavail} for any selected harts. + * + * Writes apply to the new value of \Fhartsel and \FdmDmcontrolHasel. + */ +#define DM_DMCONTROL_ACKUNAVAIL_OFFSET 27 +#define DM_DMCONTROL_ACKUNAVAIL_LENGTH 1 +#define DM_DMCONTROL_ACKUNAVAIL (0x1U << DM_DMCONTROL_ACKUNAVAIL_OFFSET) +/* + * Selects the definition of currently selected harts. + * + * 0: There is a single currently selected hart, that is selected by \Fhartsel. + * + * 1: There may be multiple currently selected harts -- the hart + * selected by \Fhartsel, plus those selected by the hart array mask + * register. + * + * An implementation which does not implement the hart array mask register + * must tie this field to 0. A debugger which wishes to use the hart array + * mask register feature should set this bit and read back to see if the functionality + * is supported. + */ +#define DM_DMCONTROL_HASEL_OFFSET 26 +#define DM_DMCONTROL_HASEL_LENGTH 1 +#define DM_DMCONTROL_HASEL (0x1U << DM_DMCONTROL_HASEL_OFFSET) +/* + * The low 10 bits of \Fhartsel: the DM-specific index of the hart to + * select. This hart is always part of the currently selected harts. + */ +#define DM_DMCONTROL_HARTSELLO_OFFSET 16 +#define DM_DMCONTROL_HARTSELLO_LENGTH 10 +#define DM_DMCONTROL_HARTSELLO (0x3ffU << DM_DMCONTROL_HARTSELLO_OFFSET) +/* + * The high 10 bits of \Fhartsel: the DM-specific index of the hart to + * select. This hart is always part of the currently selected harts. + */ +#define DM_DMCONTROL_HARTSELHI_OFFSET 6 +#define DM_DMCONTROL_HARTSELHI_LENGTH 10 +#define DM_DMCONTROL_HARTSELHI (0x3ffU << DM_DMCONTROL_HARTSELHI_OFFSET) +/* + * This optional field sets \Fkeepalive for all currently selected + * harts, unless \FdmDmcontrolClrkeepalive is simultaneously set to + * 1. + * + * Writes apply to the new value of \Fhartsel and \FdmDmcontrolHasel. + */ +#define DM_DMCONTROL_SETKEEPALIVE_OFFSET 5 +#define DM_DMCONTROL_SETKEEPALIVE_LENGTH 1 +#define DM_DMCONTROL_SETKEEPALIVE (0x1U << DM_DMCONTROL_SETKEEPALIVE_OFFSET) +/* + * This optional field clears \Fkeepalive for all currently selected + * harts. + * + * Writes apply to the new value of \Fhartsel and \FdmDmcontrolHasel. + */ +#define DM_DMCONTROL_CLRKEEPALIVE_OFFSET 4 +#define DM_DMCONTROL_CLRKEEPALIVE_LENGTH 1 +#define DM_DMCONTROL_CLRKEEPALIVE (0x1U << DM_DMCONTROL_CLRKEEPALIVE_OFFSET) +/* + * This optional field writes the halt-on-reset request bit for all + * currently selected harts, unless \FdmDmcontrolClrresethaltreq is + * simultaneously set to 1. + * When set to 1, each selected hart will halt upon the next deassertion + * of its reset. The halt-on-reset request bit is not automatically + * cleared. The debugger must write to \FdmDmcontrolClrresethaltreq to clear it. + * + * Writes apply to the new value of \Fhartsel and \FdmDmcontrolHasel. + * + * If \FdmDmstatusHasresethaltreq is 0, this field is not implemented. + */ +#define DM_DMCONTROL_SETRESETHALTREQ_OFFSET 3 +#define DM_DMCONTROL_SETRESETHALTREQ_LENGTH 1 +#define DM_DMCONTROL_SETRESETHALTREQ (0x1U << DM_DMCONTROL_SETRESETHALTREQ_OFFSET) +/* + * This optional field clears the halt-on-reset request bit for all + * currently selected harts. + * + * Writes apply to the new value of \Fhartsel and \FdmDmcontrolHasel. + */ +#define DM_DMCONTROL_CLRRESETHALTREQ_OFFSET 2 +#define DM_DMCONTROL_CLRRESETHALTREQ_LENGTH 1 +#define DM_DMCONTROL_CLRRESETHALTREQ (0x1U << DM_DMCONTROL_CLRRESETHALTREQ_OFFSET) +/* + * This bit controls the reset signal from the DM to the rest of the + * hardware platform. The signal should reset every part of the hardware platform, including + * every hart, except for the DM and any logic required to access the + * DM. + * To perform a hardware platform reset the debugger writes 1, + * and then writes 0 + * to deassert the reset. + */ +#define DM_DMCONTROL_NDMRESET_OFFSET 1 +#define DM_DMCONTROL_NDMRESET_LENGTH 1 +#define DM_DMCONTROL_NDMRESET (0x1U << DM_DMCONTROL_NDMRESET_OFFSET) +/* + * This bit serves as a reset signal for the Debug Module itself. + * After changing the value of this bit, the debugger must poll + * \RdmDmcontrol until \FdmDmcontrolDmactive has taken the requested value + * before performing any action that assumes the requested \FdmDmcontrolDmactive + * state change has completed. Hardware may + * take an arbitrarily long time to complete activation or deactivation and will + * indicate completion by setting \FdmDmcontrolDmactive to the requested value. + * + * 0: The module's state, including authentication mechanism, + * takes its reset values (the \FdmDmcontrolDmactive bit is the only bit which can + * be written to something other than its reset value). Any accesses + * to the module may fail. Specifically, \FdmDmstatusVersion might not return + * correct data. + * + * 1: The module functions normally. + * + * No other mechanism should exist that may result in resetting the + * Debug Module after power up. + * + * To place the Debug Module into a known state, a debugger may write 0 to \FdmDmcontrolDmactive, + * poll until \FdmDmcontrolDmactive is observed 0, write 1 to \FdmDmcontrolDmactive, and + * poll until \FdmDmcontrolDmactive is observed 1. + * + * Implementations may pay attention to this bit to further aid + * debugging, for example by preventing the Debug Module from being + * power gated while debugging is active. + */ +#define DM_DMCONTROL_DMACTIVE_OFFSET 0 +#define DM_DMCONTROL_DMACTIVE_LENGTH 1 +#define DM_DMCONTROL_DMACTIVE (0x1U << DM_DMCONTROL_DMACTIVE_OFFSET) +#define DM_HARTINFO 0x12 +/* + * Number of {\tt dscratch} registers available for the debugger + * to use during program buffer execution, starting from \RcsrDscratchZero. + * The debugger can make no assumptions about the contents of these + * registers between commands. + */ +#define DM_HARTINFO_NSCRATCH_OFFSET 20 +#define DM_HARTINFO_NSCRATCH_LENGTH 4 +#define DM_HARTINFO_NSCRATCH (0xfU << DM_HARTINFO_NSCRATCH_OFFSET) +/* + * 0: The {\tt data} registers are shadowed in the hart by CSRs. + * Each CSR is DXLEN bits in size, and corresponds + * to a single argument, per Table~\ref{tab:datareg}. + * + * 1: The {\tt data} registers are shadowed in the hart's memory map. + * Each register takes up 4 bytes in the memory map. + */ +#define DM_HARTINFO_DATAACCESS_OFFSET 16 +#define DM_HARTINFO_DATAACCESS_LENGTH 1 +#define DM_HARTINFO_DATAACCESS (0x1U << DM_HARTINFO_DATAACCESS_OFFSET) +/* + * If \FdmHartinfoDataaccess is 0: Number of CSRs dedicated to + * shadowing the {\tt data} registers. + * + * If \FdmHartinfoDataaccess is 1: Number of 32-bit words in the memory map + * dedicated to shadowing the {\tt data} registers. + * + * Since there are at most 12 {\tt data} registers, the value in this + * register must be 12 or smaller. + */ +#define DM_HARTINFO_DATASIZE_OFFSET 12 +#define DM_HARTINFO_DATASIZE_LENGTH 4 +#define DM_HARTINFO_DATASIZE (0xfU << DM_HARTINFO_DATASIZE_OFFSET) +/* + * If \FdmHartinfoDataaccess is 0: The number of the first CSR dedicated to + * shadowing the {\tt data} registers. + * + * If \FdmHartinfoDataaccess is 1: Address of RAM where the data + * registers are shadowed. This address is sign extended giving a + * range of -2048 to 2047, easily addressed with a load or store using + * \Xzero as the address register. + */ +#define DM_HARTINFO_DATAADDR_OFFSET 0 +#define DM_HARTINFO_DATAADDR_LENGTH 12 +#define DM_HARTINFO_DATAADDR (0xfffU << DM_HARTINFO_DATAADDR_OFFSET) +#define DM_HAWINDOWSEL 0x14 +/* + * The high bits of this field may be tied to 0, depending on how large + * the array mask register is. E.g.\ on a hardware platform with 48 harts only bit 0 + * of this field may actually be writable. + */ +#define DM_HAWINDOWSEL_HAWINDOWSEL_OFFSET 0 +#define DM_HAWINDOWSEL_HAWINDOWSEL_LENGTH 15 +#define DM_HAWINDOWSEL_HAWINDOWSEL (0x7fffU << DM_HAWINDOWSEL_HAWINDOWSEL_OFFSET) +#define DM_HAWINDOW 0x15 +#define DM_HAWINDOW_MASKDATA_OFFSET 0 +#define DM_HAWINDOW_MASKDATA_LENGTH 32 +#define DM_HAWINDOW_MASKDATA (0xffffffffU << DM_HAWINDOW_MASKDATA_OFFSET) +#define DM_ABSTRACTCS 0x16 +/* + * Size of the Program Buffer, in 32-bit words. Valid sizes are 0 - 16. + */ +#define DM_ABSTRACTCS_PROGBUFSIZE_OFFSET 24 +#define DM_ABSTRACTCS_PROGBUFSIZE_LENGTH 5 +#define DM_ABSTRACTCS_PROGBUFSIZE (0x1fU << DM_ABSTRACTCS_PROGBUFSIZE_OFFSET) +/* + * 1: An abstract command is currently being executed. + * + * This bit is set as soon as \RdmCommand is written, and is + * not cleared until that command has completed. + */ +#define DM_ABSTRACTCS_BUSY_OFFSET 12 +#define DM_ABSTRACTCS_BUSY_LENGTH 1 +#define DM_ABSTRACTCS_BUSY (0x1U << DM_ABSTRACTCS_BUSY_OFFSET) +/* + * This optional bit controls whether program buffer and abstract + * memory accesses are performed with the exact and full set of + * permission checks that apply based on the current architectural + * state of the hart performing the access, or with a relaxed set of + * permission checks (e.g. PMP restrictions are ignored). The + * details of the latter are implementation-specific. When set to 0, + * full permissions apply; when set to 1, relaxed permissions apply. + */ +#define DM_ABSTRACTCS_RELAXEDPRIV_OFFSET 11 +#define DM_ABSTRACTCS_RELAXEDPRIV_LENGTH 1 +#define DM_ABSTRACTCS_RELAXEDPRIV (0x1U << DM_ABSTRACTCS_RELAXEDPRIV_OFFSET) +/* + * Gets set if an abstract command fails. The bits in this field remain set until + * they are cleared by writing 1 to them. No abstract command is + * started until the value is reset to 0. + * + * This field only contains a valid value if \FdmAbstractcsBusy is 0. + * + * 0 (none): No error. + * + * 1 (busy): An abstract command was executing while \RdmCommand, + * \RdmAbstractcs, or \RdmAbstractauto was written, or when one + * of the {\tt data} or {\tt progbuf} registers was read or written. + * This status is only written if \FdmAbstractcsCmderr contains 0. + * + * 2 (not supported): The command in \RdmCommand is not supported. It + * may be supported with different options set, but it will not be + * supported at a later time when the hart or system state are + * different. + * + * 3 (exception): An exception occurred while executing the command + * (e.g.\ while executing the Program Buffer). + * + * 4 (halt/resume): The abstract command couldn't execute because the + * hart wasn't in the required state (running/halted), or unavailable. + * + * 5 (bus): The abstract command failed due to a bus error (e.g.\ + * alignment, access size, or timeout). + * + * 6: Reserved for future use. + * + * 7 (other): The command failed for another reason. + */ +#define DM_ABSTRACTCS_CMDERR_OFFSET 8 +#define DM_ABSTRACTCS_CMDERR_LENGTH 3 +#define DM_ABSTRACTCS_CMDERR (0x7U << DM_ABSTRACTCS_CMDERR_OFFSET) +/* + * Number of {\tt data} registers that are implemented as part of the + * abstract command interface. Valid sizes are 1 -- 12. + */ +#define DM_ABSTRACTCS_DATACOUNT_OFFSET 0 +#define DM_ABSTRACTCS_DATACOUNT_LENGTH 4 +#define DM_ABSTRACTCS_DATACOUNT (0xfU << DM_ABSTRACTCS_DATACOUNT_OFFSET) +#define DM_COMMAND 0x17 +/* + * The type determines the overall functionality of this + * abstract command. + */ +#define DM_COMMAND_CMDTYPE_OFFSET 24 +#define DM_COMMAND_CMDTYPE_LENGTH 8 +#define DM_COMMAND_CMDTYPE (0xffU << DM_COMMAND_CMDTYPE_OFFSET) +/* + * This field is interpreted in a command-specific manner, + * described for each abstract command. + */ +#define DM_COMMAND_CONTROL_OFFSET 0 +#define DM_COMMAND_CONTROL_LENGTH 24 +#define DM_COMMAND_CONTROL (0xffffffU << DM_COMMAND_CONTROL_OFFSET) +#define DM_ABSTRACTAUTO 0x18 +/* + * When a bit in this field is 1, read or write accesses to the + * corresponding {\tt progbuf} word cause the DM to act as if the + * current value in \RdmCommand was written there again after the + * access to {\tt progbuf} completes. + */ +#define DM_ABSTRACTAUTO_AUTOEXECPROGBUF_OFFSET 16 +#define DM_ABSTRACTAUTO_AUTOEXECPROGBUF_LENGTH 16 +#define DM_ABSTRACTAUTO_AUTOEXECPROGBUF (0xffffU << DM_ABSTRACTAUTO_AUTOEXECPROGBUF_OFFSET) +/* + * When a bit in this field is 1, read or write accesses to the + * corresponding {\tt data} word cause the DM to act as if the current + * value in \RdmCommand was written there again after the + * access to {\tt data} completes. + */ +#define DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET 0 +#define DM_ABSTRACTAUTO_AUTOEXECDATA_LENGTH 12 +#define DM_ABSTRACTAUTO_AUTOEXECDATA (0xfffU << DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET) +#define DM_CONFSTRPTR0 0x19 +#define DM_CONFSTRPTR0_ADDR_OFFSET 0 +#define DM_CONFSTRPTR0_ADDR_LENGTH 32 +#define DM_CONFSTRPTR0_ADDR (0xffffffffU << DM_CONFSTRPTR0_ADDR_OFFSET) +#define DM_CONFSTRPTR1 0x1a +#define DM_CONFSTRPTR1_ADDR_OFFSET 0 +#define DM_CONFSTRPTR1_ADDR_LENGTH 32 +#define DM_CONFSTRPTR1_ADDR (0xffffffffU << DM_CONFSTRPTR1_ADDR_OFFSET) +#define DM_CONFSTRPTR2 0x1b +#define DM_CONFSTRPTR2_ADDR_OFFSET 0 +#define DM_CONFSTRPTR2_ADDR_LENGTH 32 +#define DM_CONFSTRPTR2_ADDR (0xffffffffU << DM_CONFSTRPTR2_ADDR_OFFSET) +#define DM_CONFSTRPTR3 0x1c +#define DM_CONFSTRPTR3_ADDR_OFFSET 0 +#define DM_CONFSTRPTR3_ADDR_LENGTH 32 +#define DM_CONFSTRPTR3_ADDR (0xffffffffU << DM_CONFSTRPTR3_ADDR_OFFSET) +#define DM_NEXTDM 0x1d +#define DM_NEXTDM_ADDR_OFFSET 0 +#define DM_NEXTDM_ADDR_LENGTH 32 +#define DM_NEXTDM_ADDR (0xffffffffU << DM_NEXTDM_ADDR_OFFSET) +#define DM_DATA0 0x04 +#define DM_DATA0_DATA_OFFSET 0 +#define DM_DATA0_DATA_LENGTH 32 +#define DM_DATA0_DATA (0xffffffffU << DM_DATA0_DATA_OFFSET) +#define DM_DATA1 0x05 +#define DM_DATA2 0x06 +#define DM_DATA3 0x07 +#define DM_DATA4 0x08 +#define DM_DATA5 0x09 +#define DM_DATA6 0x0a +#define DM_DATA7 0x0b +#define DM_DATA8 0x0c +#define DM_DATA9 0x0d +#define DM_DATA10 0x0e +#define DM_DATA11 0x0f +#define DM_PROGBUF0 0x20 +#define DM_PROGBUF0_DATA_OFFSET 0 +#define DM_PROGBUF0_DATA_LENGTH 32 +#define DM_PROGBUF0_DATA (0xffffffffU << DM_PROGBUF0_DATA_OFFSET) +#define DM_PROGBUF1 0x21 +#define DM_PROGBUF2 0x22 +#define DM_PROGBUF3 0x23 +#define DM_PROGBUF4 0x24 +#define DM_PROGBUF5 0x25 +#define DM_PROGBUF6 0x26 +#define DM_PROGBUF7 0x27 +#define DM_PROGBUF8 0x28 +#define DM_PROGBUF9 0x29 +#define DM_PROGBUF10 0x2a +#define DM_PROGBUF11 0x2b +#define DM_PROGBUF12 0x2c +#define DM_PROGBUF13 0x2d +#define DM_PROGBUF14 0x2e +#define DM_PROGBUF15 0x2f +#define DM_AUTHDATA 0x30 +#define DM_AUTHDATA_DATA_OFFSET 0 +#define DM_AUTHDATA_DATA_LENGTH 32 +#define DM_AUTHDATA_DATA (0xffffffffU << DM_AUTHDATA_DATA_OFFSET) +#define DM_DMCS2 0x32 +/* + * 0: The remaining fields in this register configure halt groups. + * + * 1: The remaining fields in this register configure resume groups. + */ +#define DM_DMCS2_GROUPTYPE_OFFSET 11 +#define DM_DMCS2_GROUPTYPE_LENGTH 1 +#define DM_DMCS2_GROUPTYPE (0x1U << DM_DMCS2_GROUPTYPE_OFFSET) +/* + * This field contains the currently selected DM external trigger. + * + * If a non-existent trigger value is written here, the hardware will + * change it to a valid one or 0 if no DM external triggers exist. + */ +#define DM_DMCS2_DMEXTTRIGGER_OFFSET 7 +#define DM_DMCS2_DMEXTTRIGGER_LENGTH 4 +#define DM_DMCS2_DMEXTTRIGGER (0xfU << DM_DMCS2_DMEXTTRIGGER_OFFSET) +/* + * When \FdmDmcsTwoHgselect is 0, contains the group of the hart + * specified by \Fhartsel. + * + * When \FdmDmcsTwoHgselect is 1, contains the group of the DM external + * trigger selected by \FdmDmcsTwoDmexttrigger. + * + * The value written to this field is ignored unless \FdmDmcsTwoHgwrite + * is also written 1. + * + * Group numbers are contiguous starting at 0, with the highest number + * being implementation-dependent, and possibly different between + * different group types. Debuggers should read back this field after + * writing to confirm they are using a hart group that is supported. + * + * If groups aren't implemented, then this entire field is 0. + */ +#define DM_DMCS2_GROUP_OFFSET 2 +#define DM_DMCS2_GROUP_LENGTH 5 +#define DM_DMCS2_GROUP (0x1fU << DM_DMCS2_GROUP_OFFSET) +/* + * When 1 is written and \FdmDmcsTwoHgselect is 0, for every selected + * hart the DM will change its group to the value written to \FdmDmcsTwoGroup, + * if the hardware supports that group for that hart. + * Implementations may also change the group of a minimal set of + * unselected harts in the same way, if that is necessary due to + * a hardware limitation. + * + * When 1 is written and \FdmDmcsTwoHgselect is 1, the DM will change + * the group of the DM external trigger selected by \FdmDmcsTwoDmexttrigger + * to the value written to \FdmDmcsTwoGroup, if the hardware supports + * that group for that trigger. + * + * Writing 0 has no effect. + */ +#define DM_DMCS2_HGWRITE_OFFSET 1 +#define DM_DMCS2_HGWRITE_LENGTH 1 +#define DM_DMCS2_HGWRITE (0x1U << DM_DMCS2_HGWRITE_OFFSET) +/* + * 0: Operate on harts. + * + * 1: Operate on DM external triggers. + * + * If there are no DM external triggers, this field must be tied to 0. + */ +#define DM_DMCS2_HGSELECT_OFFSET 0 +#define DM_DMCS2_HGSELECT_LENGTH 1 +#define DM_DMCS2_HGSELECT (0x1U << DM_DMCS2_HGSELECT_OFFSET) +#define DM_HALTSUM0 0x40 +#define DM_HALTSUM0_HALTSUM0_OFFSET 0 +#define DM_HALTSUM0_HALTSUM0_LENGTH 32 +#define DM_HALTSUM0_HALTSUM0 (0xffffffffU << DM_HALTSUM0_HALTSUM0_OFFSET) +#define DM_HALTSUM1 0x13 +#define DM_HALTSUM1_HALTSUM1_OFFSET 0 +#define DM_HALTSUM1_HALTSUM1_LENGTH 32 +#define DM_HALTSUM1_HALTSUM1 (0xffffffffU << DM_HALTSUM1_HALTSUM1_OFFSET) +#define DM_HALTSUM2 0x34 +#define DM_HALTSUM2_HALTSUM2_OFFSET 0 +#define DM_HALTSUM2_HALTSUM2_LENGTH 32 +#define DM_HALTSUM2_HALTSUM2 (0xffffffffU << DM_HALTSUM2_HALTSUM2_OFFSET) +#define DM_HALTSUM3 0x35 +#define DM_HALTSUM3_HALTSUM3_OFFSET 0 +#define DM_HALTSUM3_HALTSUM3_LENGTH 32 +#define DM_HALTSUM3_HALTSUM3 (0xffffffffU << DM_HALTSUM3_HALTSUM3_OFFSET) +#define DM_SBCS 0x38 +/* + * 0: The System Bus interface conforms to mainline drafts of this + * spec older than 1 January, 2018. + * + * 1: The System Bus interface conforms to this version of the spec. + * + * Other values are reserved for future versions. + */ +#define DM_SBCS_SBVERSION_OFFSET 29 +#define DM_SBCS_SBVERSION_LENGTH 3 +#define DM_SBCS_SBVERSION (0x7U << DM_SBCS_SBVERSION_OFFSET) +/* + * Set when the debugger attempts to read data while a read is in + * progress, or when the debugger initiates a new access while one is + * already in progress (while \FdmSbcsSbbusy is set). It remains set until + * it's explicitly cleared by the debugger. + * + * While this field is set, no more system bus accesses can be + * initiated by the Debug Module. + */ +#define DM_SBCS_SBBUSYERROR_OFFSET 22 +#define DM_SBCS_SBBUSYERROR_LENGTH 1 +#define DM_SBCS_SBBUSYERROR (0x1U << DM_SBCS_SBBUSYERROR_OFFSET) +/* + * When 1, indicates the system bus master is busy. (Whether the + * system bus itself is busy is related, but not the same thing.) This + * bit goes high immediately when a read or write is requested for any + * reason, and does not go low until the access is fully completed. + * + * Writes to \RdmSbcs while \FdmSbcsSbbusy is high result in undefined + * behavior. A debugger must not write to \RdmSbcs until it reads + * \FdmSbcsSbbusy as 0. + */ +#define DM_SBCS_SBBUSY_OFFSET 21 +#define DM_SBCS_SBBUSY_LENGTH 1 +#define DM_SBCS_SBBUSY (0x1U << DM_SBCS_SBBUSY_OFFSET) +/* + * When 1, every write to \RdmSbaddressZero automatically triggers a + * system bus read at the new address. + */ +#define DM_SBCS_SBREADONADDR_OFFSET 20 +#define DM_SBCS_SBREADONADDR_LENGTH 1 +#define DM_SBCS_SBREADONADDR (0x1U << DM_SBCS_SBREADONADDR_OFFSET) +/* + * Select the access size to use for system bus accesses. + * + * 0: 8-bit + * + * 1: 16-bit + * + * 2: 32-bit + * + * 3: 64-bit + * + * 4: 128-bit + * + * If \FdmSbcsSbaccess has an unsupported value when the DM starts a bus + * access, the access is not performed and \FdmSbcsSberror is set to 4. + */ +#define DM_SBCS_SBACCESS_OFFSET 17 +#define DM_SBCS_SBACCESS_LENGTH 3 +#define DM_SBCS_SBACCESS (0x7U << DM_SBCS_SBACCESS_OFFSET) +/* + * When 1, {\tt sbaddress} is incremented by the access size (in + * bytes) selected in \FdmSbcsSbaccess after every system bus access. + */ +#define DM_SBCS_SBAUTOINCREMENT_OFFSET 16 +#define DM_SBCS_SBAUTOINCREMENT_LENGTH 1 +#define DM_SBCS_SBAUTOINCREMENT (0x1U << DM_SBCS_SBAUTOINCREMENT_OFFSET) +/* + * When 1, every read from \RdmSbdataZero automatically triggers a + * system bus read at the (possibly auto-incremented) address. + */ +#define DM_SBCS_SBREADONDATA_OFFSET 15 +#define DM_SBCS_SBREADONDATA_LENGTH 1 +#define DM_SBCS_SBREADONDATA (0x1U << DM_SBCS_SBREADONDATA_OFFSET) +/* + * When the Debug Module's system bus + * master encounters an error, this field gets set. The bits in this + * field remain set until they are cleared by writing 1 to them. + * While this field is non-zero, no more system bus accesses can be + * initiated by the Debug Module. + * + * An implementation may report ``Other'' (7) for any error condition. + * + * 0: There was no bus error. + * + * 1: There was a timeout. + * + * 2: A bad address was accessed. + * + * 3: There was an alignment error. + * + * 4: An access of unsupported size was requested. + * + * 7: Other. + */ +#define DM_SBCS_SBERROR_OFFSET 12 +#define DM_SBCS_SBERROR_LENGTH 3 +#define DM_SBCS_SBERROR (0x7U << DM_SBCS_SBERROR_OFFSET) +/* + * Width of system bus addresses in bits. (0 indicates there is no bus + * access support.) + */ +#define DM_SBCS_SBASIZE_OFFSET 5 +#define DM_SBCS_SBASIZE_LENGTH 7 +#define DM_SBCS_SBASIZE (0x7fU << DM_SBCS_SBASIZE_OFFSET) +/* + * 1 when 128-bit system bus accesses are supported. + */ +#define DM_SBCS_SBACCESS128_OFFSET 4 +#define DM_SBCS_SBACCESS128_LENGTH 1 +#define DM_SBCS_SBACCESS128 (0x1U << DM_SBCS_SBACCESS128_OFFSET) +/* + * 1 when 64-bit system bus accesses are supported. + */ +#define DM_SBCS_SBACCESS64_OFFSET 3 +#define DM_SBCS_SBACCESS64_LENGTH 1 +#define DM_SBCS_SBACCESS64 (0x1U << DM_SBCS_SBACCESS64_OFFSET) +/* + * 1 when 32-bit system bus accesses are supported. + */ +#define DM_SBCS_SBACCESS32_OFFSET 2 +#define DM_SBCS_SBACCESS32_LENGTH 1 +#define DM_SBCS_SBACCESS32 (0x1U << DM_SBCS_SBACCESS32_OFFSET) +/* + * 1 when 16-bit system bus accesses are supported. + */ +#define DM_SBCS_SBACCESS16_OFFSET 1 +#define DM_SBCS_SBACCESS16_LENGTH 1 +#define DM_SBCS_SBACCESS16 (0x1U << DM_SBCS_SBACCESS16_OFFSET) +/* + * 1 when 8-bit system bus accesses are supported. + */ +#define DM_SBCS_SBACCESS8_OFFSET 0 +#define DM_SBCS_SBACCESS8_LENGTH 1 +#define DM_SBCS_SBACCESS8 (0x1U << DM_SBCS_SBACCESS8_OFFSET) +#define DM_SBADDRESS0 0x39 +/* + * Accesses bits 31:0 of the physical address in {\tt sbaddress}. + */ +#define DM_SBADDRESS0_ADDRESS_OFFSET 0 +#define DM_SBADDRESS0_ADDRESS_LENGTH 32 +#define DM_SBADDRESS0_ADDRESS (0xffffffffU << DM_SBADDRESS0_ADDRESS_OFFSET) +#define DM_SBADDRESS1 0x3a +/* + * Accesses bits 63:32 of the physical address in {\tt sbaddress} (if + * the system address bus is that wide). + */ +#define DM_SBADDRESS1_ADDRESS_OFFSET 0 +#define DM_SBADDRESS1_ADDRESS_LENGTH 32 +#define DM_SBADDRESS1_ADDRESS (0xffffffffU << DM_SBADDRESS1_ADDRESS_OFFSET) +#define DM_SBADDRESS2 0x3b +/* + * Accesses bits 95:64 of the physical address in {\tt sbaddress} (if + * the system address bus is that wide). + */ +#define DM_SBADDRESS2_ADDRESS_OFFSET 0 +#define DM_SBADDRESS2_ADDRESS_LENGTH 32 +#define DM_SBADDRESS2_ADDRESS (0xffffffffU << DM_SBADDRESS2_ADDRESS_OFFSET) +#define DM_SBADDRESS3 0x37 +/* + * Accesses bits 127:96 of the physical address in {\tt sbaddress} (if + * the system address bus is that wide). + */ +#define DM_SBADDRESS3_ADDRESS_OFFSET 0 +#define DM_SBADDRESS3_ADDRESS_LENGTH 32 +#define DM_SBADDRESS3_ADDRESS (0xffffffffU << DM_SBADDRESS3_ADDRESS_OFFSET) +#define DM_SBDATA0 0x3c +/* + * Accesses bits 31:0 of {\tt sbdata}. + */ +#define DM_SBDATA0_DATA_OFFSET 0 +#define DM_SBDATA0_DATA_LENGTH 32 +#define DM_SBDATA0_DATA (0xffffffffU << DM_SBDATA0_DATA_OFFSET) +#define DM_SBDATA1 0x3d +/* + * Accesses bits 63:32 of {\tt sbdata} (if the system bus is that + * wide). + */ +#define DM_SBDATA1_DATA_OFFSET 0 +#define DM_SBDATA1_DATA_LENGTH 32 +#define DM_SBDATA1_DATA (0xffffffffU << DM_SBDATA1_DATA_OFFSET) +#define DM_SBDATA2 0x3e +/* + * Accesses bits 95:64 of {\tt sbdata} (if the system bus is that + * wide). + */ +#define DM_SBDATA2_DATA_OFFSET 0 +#define DM_SBDATA2_DATA_LENGTH 32 +#define DM_SBDATA2_DATA (0xffffffffU << DM_SBDATA2_DATA_OFFSET) +#define DM_SBDATA3 0x3f +/* + * Accesses bits 127:96 of {\tt sbdata} (if the system bus is that + * wide). + */ +#define DM_SBDATA3_DATA_OFFSET 0 +#define DM_SBDATA3_DATA_LENGTH 32 +#define DM_SBDATA3_DATA (0xffffffffU << DM_SBDATA3_DATA_OFFSET) +#define DM_CUSTOM 0x1f +#define DM_CUSTOM0 0x70 +#define DM_CUSTOM1 0x71 +#define DM_CUSTOM2 0x72 +#define DM_CUSTOM3 0x73 +#define DM_CUSTOM4 0x74 +#define DM_CUSTOM5 0x75 +#define DM_CUSTOM6 0x76 +#define DM_CUSTOM7 0x77 +#define DM_CUSTOM8 0x78 +#define DM_CUSTOM9 0x79 +#define DM_CUSTOM10 0x7a +#define DM_CUSTOM11 0x7b +#define DM_CUSTOM12 0x7c +#define DM_CUSTOM13 0x7d +#define DM_CUSTOM14 0x7e +#define DM_CUSTOM15 0x7f +#define SHORTNAME 0x123 +/* + * Description of what this field is used for. + */ +#define SHORTNAME_FIELD_OFFSET 0 +#define SHORTNAME_FIELD_LENGTH 8 +#define SHORTNAME_FIELD (0xffU << SHORTNAME_FIELD_OFFSET) +/* + * This is 0 to indicate Access Register Command. + */ +#define AC_ACCESS_REGISTER_CMDTYPE_OFFSET 24 +#define AC_ACCESS_REGISTER_CMDTYPE_LENGTH 8 +#define AC_ACCESS_REGISTER_CMDTYPE (0xffU << AC_ACCESS_REGISTER_CMDTYPE_OFFSET) +/* + * 2: Access the lowest 32 bits of the register. + * + * 3: Access the lowest 64 bits of the register. + * + * 4: Access the lowest 128 bits of the register. + * + * If \FacAccessregisterAarsize specifies a size larger than the register's actual size, + * then the access must fail. If a register is accessible, then reads of \FacAccessregisterAarsize + * less than or equal to the register's actual size must be supported. + * Writing less than the full register may be supported, but what + * happens to the high bits in that case is \unspecified. + * + * This field controls the Argument Width as referenced in + * Table~\ref{tab:datareg}. + */ +#define AC_ACCESS_REGISTER_AARSIZE_OFFSET 20 +#define AC_ACCESS_REGISTER_AARSIZE_LENGTH 3 +#define AC_ACCESS_REGISTER_AARSIZE (0x7U << AC_ACCESS_REGISTER_AARSIZE_OFFSET) +/* + * 0: No effect. This variant must be supported. + * + * 1: After a successful register access, \FacAccessregisterRegno is + * incremented. Incrementing past the highest supported value + * causes \FacAccessregisterRegno to become \unspecified. Supporting + * this variant is optional. It is undefined whether the increment + * happens when \FacAccessregisterTransfer is 0. + */ +#define AC_ACCESS_REGISTER_AARPOSTINCREMENT_OFFSET 19 +#define AC_ACCESS_REGISTER_AARPOSTINCREMENT_LENGTH 1 +#define AC_ACCESS_REGISTER_AARPOSTINCREMENT (0x1U << AC_ACCESS_REGISTER_AARPOSTINCREMENT_OFFSET) +/* + * 0: No effect. This variant must be supported, and is the only + * supported one if \FdmAbstractcsProgbufsize is 0. + * + * 1: Execute the program in the Program Buffer exactly once after + * performing the transfer, if any. Supporting this variant is + * optional. + */ +#define AC_ACCESS_REGISTER_POSTEXEC_OFFSET 18 +#define AC_ACCESS_REGISTER_POSTEXEC_LENGTH 1 +#define AC_ACCESS_REGISTER_POSTEXEC (0x1U << AC_ACCESS_REGISTER_POSTEXEC_OFFSET) +/* + * 0: Don't do the operation specified by \FacAccessregisterWrite. + * + * 1: Do the operation specified by \FacAccessregisterWrite. + * + * This bit can be used to just execute the Program Buffer without + * having to worry about placing valid values into \FacAccessregisterAarsize or \FacAccessregisterRegno. + */ +#define AC_ACCESS_REGISTER_TRANSFER_OFFSET 17 +#define AC_ACCESS_REGISTER_TRANSFER_LENGTH 1 +#define AC_ACCESS_REGISTER_TRANSFER (0x1U << AC_ACCESS_REGISTER_TRANSFER_OFFSET) +/* + * When \FacAccessregisterTransfer is set: + * 0: Copy data from the specified register into {\tt arg0} portion + * of {\tt data}. + * + * 1: Copy data from {\tt arg0} portion of {\tt data} into the + * specified register. + */ +#define AC_ACCESS_REGISTER_WRITE_OFFSET 16 +#define AC_ACCESS_REGISTER_WRITE_LENGTH 1 +#define AC_ACCESS_REGISTER_WRITE (0x1U << AC_ACCESS_REGISTER_WRITE_OFFSET) +/* + * Number of the register to access, as described in + * Table~\ref{tab:regno}. + * \RcsrDpc may be used as an alias for PC if this command is + * supported on a non-halted hart. + */ +#define AC_ACCESS_REGISTER_REGNO_OFFSET 0 +#define AC_ACCESS_REGISTER_REGNO_LENGTH 16 +#define AC_ACCESS_REGISTER_REGNO (0xffffU << AC_ACCESS_REGISTER_REGNO_OFFSET) +/* + * This is 1 to indicate Quick Access command. + */ +#define AC_QUICK_ACCESS_CMDTYPE_OFFSET 24 +#define AC_QUICK_ACCESS_CMDTYPE_LENGTH 8 +#define AC_QUICK_ACCESS_CMDTYPE (0xffU << AC_QUICK_ACCESS_CMDTYPE_OFFSET) +/* + * This is 2 to indicate Access Memory Command. + */ +#define AC_ACCESS_MEMORY_CMDTYPE_OFFSET 24 +#define AC_ACCESS_MEMORY_CMDTYPE_LENGTH 8 +#define AC_ACCESS_MEMORY_CMDTYPE (0xffU << AC_ACCESS_MEMORY_CMDTYPE_OFFSET) +/* + * An implementation does not have to implement both virtual and + * physical accesses, but it must fail accesses that it doesn't + * support. + * + * 0: Addresses are physical (to the hart they are performed on). + * + * 1: Addresses are virtual, and translated the way they would be from + * M-mode, with \FcsrMstatusMprv set. + * + * Debug Modules on systems without address translation (i.e. virtual addresses equal physical) + * may optionally allow \FacAccessmemoryAamvirtual set to 1, which would produce the same result as + * that same abstract command with \FacAccessmemoryAamvirtual cleared. + */ +#define AC_ACCESS_MEMORY_AAMVIRTUAL_OFFSET 23 +#define AC_ACCESS_MEMORY_AAMVIRTUAL_LENGTH 1 +#define AC_ACCESS_MEMORY_AAMVIRTUAL (0x1U << AC_ACCESS_MEMORY_AAMVIRTUAL_OFFSET) +/* + * 0: Access the lowest 8 bits of the memory location. + * + * 1: Access the lowest 16 bits of the memory location. + * + * 2: Access the lowest 32 bits of the memory location. + * + * 3: Access the lowest 64 bits of the memory location. + * + * 4: Access the lowest 128 bits of the memory location. + */ +#define AC_ACCESS_MEMORY_AAMSIZE_OFFSET 20 +#define AC_ACCESS_MEMORY_AAMSIZE_LENGTH 3 +#define AC_ACCESS_MEMORY_AAMSIZE (0x7U << AC_ACCESS_MEMORY_AAMSIZE_OFFSET) +/* + * After a memory access has completed, if this bit is 1, increment + * {\tt arg1} (which contains the address used) by the number of bytes + * encoded in \FacAccessmemoryAamsize. + * + * Supporting this variant is optional, but highly recommended for + * performance reasons. + */ +#define AC_ACCESS_MEMORY_AAMPOSTINCREMENT_OFFSET 19 +#define AC_ACCESS_MEMORY_AAMPOSTINCREMENT_LENGTH 1 +#define AC_ACCESS_MEMORY_AAMPOSTINCREMENT (0x1U << AC_ACCESS_MEMORY_AAMPOSTINCREMENT_OFFSET) +/* + * 0: Copy data from the memory location specified in {\tt arg1} into + * the low bits of {\tt arg0}. Any remaining bits of {\tt arg0} now + * have an undefined value. + * + * 1: Copy data from the low bits of {\tt arg0} into the memory + * location specified in {\tt arg1}. + */ +#define AC_ACCESS_MEMORY_WRITE_OFFSET 16 +#define AC_ACCESS_MEMORY_WRITE_LENGTH 1 +#define AC_ACCESS_MEMORY_WRITE (0x1U << AC_ACCESS_MEMORY_WRITE_OFFSET) +/* + * These bits are reserved for target-specific uses. + */ +#define AC_ACCESS_MEMORY_TARGET_SPECIFIC_OFFSET 14 +#define AC_ACCESS_MEMORY_TARGET_SPECIFIC_LENGTH 2 +#define AC_ACCESS_MEMORY_TARGET_SPECIFIC (0x3U << AC_ACCESS_MEMORY_TARGET_SPECIFIC_OFFSET) +#define VIRT_PRIV virtual +/* + * Contains the virtualization mode the hart was operating in when Debug + * Mode was entered. The encoding is described in Table \ref{tab:privmode}, + * and matches the virtualization mode encoding from the Privileged Spec. + * A user can write this value to change the hart's virtualization mode + * when exiting Debug Mode. + */ +#define VIRT_PRIV_V_OFFSET 2 +#define VIRT_PRIV_V_LENGTH 1 +#define VIRT_PRIV_V (0x1U << VIRT_PRIV_V_OFFSET) +/* + * Contains the privilege mode the hart was operating in when Debug + * Mode was entered. The encoding is described in Table + * \ref{tab:privmode}, and matches the privilege mode encoding from + * the Privileged Spec. A user can write this + * value to change the hart's privilege mode when exiting Debug Mode. + */ +#define VIRT_PRIV_PRV_OFFSET 0 +#define VIRT_PRIV_PRV_LENGTH 2 +#define VIRT_PRIV_PRV (0x3U << VIRT_PRIV_PRV_OFFSET) +#define DMI_SERCS 0x34 +/* + * Number of supported serial ports. + */ +#define DMI_SERCS_SERIALCOUNT_OFFSET 28 +#define DMI_SERCS_SERIALCOUNT_LENGTH 4 +#define DMI_SERCS_SERIALCOUNT (0xfU << DMI_SERCS_SERIALCOUNT_OFFSET) +/* + * Select which serial port is accessed by \RdmiSerrx and \RdmiSertx. + */ +#define DMI_SERCS_SERIAL_OFFSET 24 +#define DMI_SERCS_SERIAL_LENGTH 3 +#define DMI_SERCS_SERIAL (0x7U << DMI_SERCS_SERIAL_OFFSET) +#define DMI_SERCS_ERROR7_OFFSET 23 +#define DMI_SERCS_ERROR7_LENGTH 1 +#define DMI_SERCS_ERROR7 (0x1U << DMI_SERCS_ERROR7_OFFSET) +#define DMI_SERCS_VALID7_OFFSET 22 +#define DMI_SERCS_VALID7_LENGTH 1 +#define DMI_SERCS_VALID7 (0x1U << DMI_SERCS_VALID7_OFFSET) +#define DMI_SERCS_FULL7_OFFSET 21 +#define DMI_SERCS_FULL7_LENGTH 1 +#define DMI_SERCS_FULL7 (0x1U << DMI_SERCS_FULL7_OFFSET) +#define DMI_SERCS_ERROR6_OFFSET 20 +#define DMI_SERCS_ERROR6_LENGTH 1 +#define DMI_SERCS_ERROR6 (0x1U << DMI_SERCS_ERROR6_OFFSET) +#define DMI_SERCS_VALID6_OFFSET 19 +#define DMI_SERCS_VALID6_LENGTH 1 +#define DMI_SERCS_VALID6 (0x1U << DMI_SERCS_VALID6_OFFSET) +#define DMI_SERCS_FULL6_OFFSET 18 +#define DMI_SERCS_FULL6_LENGTH 1 +#define DMI_SERCS_FULL6 (0x1U << DMI_SERCS_FULL6_OFFSET) +#define DMI_SERCS_ERROR5_OFFSET 17 +#define DMI_SERCS_ERROR5_LENGTH 1 +#define DMI_SERCS_ERROR5 (0x1U << DMI_SERCS_ERROR5_OFFSET) +#define DMI_SERCS_VALID5_OFFSET 16 +#define DMI_SERCS_VALID5_LENGTH 1 +#define DMI_SERCS_VALID5 (0x1U << DMI_SERCS_VALID5_OFFSET) +#define DMI_SERCS_FULL5_OFFSET 15 +#define DMI_SERCS_FULL5_LENGTH 1 +#define DMI_SERCS_FULL5 (0x1U << DMI_SERCS_FULL5_OFFSET) +#define DMI_SERCS_ERROR4_OFFSET 14 +#define DMI_SERCS_ERROR4_LENGTH 1 +#define DMI_SERCS_ERROR4 (0x1U << DMI_SERCS_ERROR4_OFFSET) +#define DMI_SERCS_VALID4_OFFSET 13 +#define DMI_SERCS_VALID4_LENGTH 1 +#define DMI_SERCS_VALID4 (0x1U << DMI_SERCS_VALID4_OFFSET) +#define DMI_SERCS_FULL4_OFFSET 12 +#define DMI_SERCS_FULL4_LENGTH 1 +#define DMI_SERCS_FULL4 (0x1U << DMI_SERCS_FULL4_OFFSET) +#define DMI_SERCS_ERROR3_OFFSET 11 +#define DMI_SERCS_ERROR3_LENGTH 1 +#define DMI_SERCS_ERROR3 (0x1U << DMI_SERCS_ERROR3_OFFSET) +#define DMI_SERCS_VALID3_OFFSET 10 +#define DMI_SERCS_VALID3_LENGTH 1 +#define DMI_SERCS_VALID3 (0x1U << DMI_SERCS_VALID3_OFFSET) +#define DMI_SERCS_FULL3_OFFSET 9 +#define DMI_SERCS_FULL3_LENGTH 1 +#define DMI_SERCS_FULL3 (0x1U << DMI_SERCS_FULL3_OFFSET) +#define DMI_SERCS_ERROR2_OFFSET 8 +#define DMI_SERCS_ERROR2_LENGTH 1 +#define DMI_SERCS_ERROR2 (0x1U << DMI_SERCS_ERROR2_OFFSET) +#define DMI_SERCS_VALID2_OFFSET 7 +#define DMI_SERCS_VALID2_LENGTH 1 +#define DMI_SERCS_VALID2 (0x1U << DMI_SERCS_VALID2_OFFSET) +#define DMI_SERCS_FULL2_OFFSET 6 +#define DMI_SERCS_FULL2_LENGTH 1 +#define DMI_SERCS_FULL2 (0x1U << DMI_SERCS_FULL2_OFFSET) +#define DMI_SERCS_ERROR1_OFFSET 5 +#define DMI_SERCS_ERROR1_LENGTH 1 +#define DMI_SERCS_ERROR1 (0x1U << DMI_SERCS_ERROR1_OFFSET) +#define DMI_SERCS_VALID1_OFFSET 4 +#define DMI_SERCS_VALID1_LENGTH 1 +#define DMI_SERCS_VALID1 (0x1U << DMI_SERCS_VALID1_OFFSET) +#define DMI_SERCS_FULL1_OFFSET 3 +#define DMI_SERCS_FULL1_LENGTH 1 +#define DMI_SERCS_FULL1 (0x1U << DMI_SERCS_FULL1_OFFSET) +/* + * 1 when the debugger-to-core queue for serial port 0 has + * over or underflowed. This bit will remain set until it is reset by + * writing 1 to this bit. + */ +#define DMI_SERCS_ERROR0_OFFSET 2 +#define DMI_SERCS_ERROR0_LENGTH 1 +#define DMI_SERCS_ERROR0 (0x1U << DMI_SERCS_ERROR0_OFFSET) +/* + * 1 when the core-to-debugger queue for serial port 0 is not empty. + */ +#define DMI_SERCS_VALID0_OFFSET 1 +#define DMI_SERCS_VALID0_LENGTH 1 +#define DMI_SERCS_VALID0 (0x1U << DMI_SERCS_VALID0_OFFSET) +/* + * 1 when the debugger-to-core queue for serial port 0 is full. + */ +#define DMI_SERCS_FULL0_OFFSET 0 +#define DMI_SERCS_FULL0_LENGTH 1 +#define DMI_SERCS_FULL0 (0x1U << DMI_SERCS_FULL0_OFFSET) +#define DMI_SERTX 0x35 +#define DMI_SERTX_DATA_OFFSET 0 +#define DMI_SERTX_DATA_LENGTH 32 +#define DMI_SERTX_DATA (0xffffffffU << DMI_SERTX_DATA_OFFSET) +#define DMI_SERRX 0x36 +#define DMI_SERRX_DATA_OFFSET 0 +#define DMI_SERRX_DATA_LENGTH 32 +#define DMI_SERRX_DATA (0xffffffffU << DMI_SERRX_DATA_OFFSET) diff --git a/vendor/riscv-isa-sim/riscv/debug_module.cc b/vendor/riscv-isa-sim/riscv/debug_module.cc new file mode 100644 index 00000000..0eac8424 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/debug_module.cc @@ -0,0 +1,938 @@ +#include + +#include "sim.h" +#include "debug_module.h" +#include "debug_defines.h" +#include "opcodes.h" +#include "mmu.h" + +#include "debug_rom/debug_rom.h" +#include "debug_rom_defines.h" + +#if 0 +# define D(x) x +#else +# define D(x) +#endif + +// Return the number of bits wide that a field has to be to encode up to n +// different values. +// 1->0, 2->1, 3->2, 4->2 +static unsigned field_width(unsigned n) +{ + unsigned i = 0; + n -= 1; + while (n) { + i++; + n >>= 1; + } + return i; +} + +///////////////////////// debug_module_t + +debug_module_t::debug_module_t(sim_t *sim, const debug_module_config_t &config) : + nprocs(sim->nprocs()), + config(config), + program_buffer_bytes((config.support_impebreak ? 4 : 0) + 4*config.progbufsize), + debug_progbuf_start(debug_data_start - program_buffer_bytes), + debug_abstract_start(debug_progbuf_start - debug_abstract_size*4), + custom_base(0), + hartsellen(field_width(sim->nprocs())), + sim(sim), + // The spec lets a debugger select nonexistent harts. Create hart_state for + // them because I'm too lazy to add the code to just ignore accesses. + hart_state(1 << field_width(sim->nprocs())), + hart_array_mask(sim->nprocs()), + rti_remaining(0) +{ + D(fprintf(stderr, "debug_data_start=0x%x\n", debug_data_start)); + D(fprintf(stderr, "debug_progbuf_start=0x%x\n", debug_progbuf_start)); + D(fprintf(stderr, "debug_abstract_start=0x%x\n", debug_abstract_start)); + + assert(nprocs <= 1024); + + program_buffer = new uint8_t[program_buffer_bytes]; + + memset(debug_rom_flags, 0, sizeof(debug_rom_flags)); + memset(program_buffer, 0, program_buffer_bytes); + memset(dmdata, 0, sizeof(dmdata)); + + if (config.support_impebreak) { + program_buffer[4*config.progbufsize] = ebreak(); + program_buffer[4*config.progbufsize+1] = ebreak() >> 8; + program_buffer[4*config.progbufsize+2] = ebreak() >> 16; + program_buffer[4*config.progbufsize+3] = ebreak() >> 24; + } + + write32(debug_rom_whereto, 0, + jal(ZERO, debug_abstract_start - DEBUG_ROM_WHERETO)); + + memset(debug_abstract, 0, sizeof(debug_abstract)); + + reset(); +} + +debug_module_t::~debug_module_t() +{ + delete[] program_buffer; +} + +void debug_module_t::reset() +{ + assert(sim->nprocs() > 0); + for (unsigned i = 0; i < sim->nprocs(); i++) { + processor_t *proc = sim->get_core(i); + if (proc) + proc->halt_request = proc->HR_NONE; + } + + memset(&dmcontrol, 0, sizeof(dmcontrol)); + + memset(&dmstatus, 0, sizeof(dmstatus)); + dmstatus.impebreak = config.support_impebreak; + dmstatus.authenticated = !config.require_authentication; + dmstatus.version = 2; + + memset(&abstractcs, 0, sizeof(abstractcs)); + abstractcs.datacount = sizeof(dmdata) / 4; + abstractcs.progbufsize = config.progbufsize; + + memset(&abstractauto, 0, sizeof(abstractauto)); + + memset(&sbcs, 0, sizeof(sbcs)); + if (config.max_sba_data_width > 0) { + sbcs.version = 1; + sbcs.asize = sizeof(reg_t) * 8; + } + if (config.max_sba_data_width >= 64) + sbcs.access64 = true; + if (config.max_sba_data_width >= 32) + sbcs.access32 = true; + if (config.max_sba_data_width >= 16) + sbcs.access16 = true; + if (config.max_sba_data_width >= 8) + sbcs.access8 = true; + + challenge = random(); +} + +void debug_module_t::add_device(bus_t *bus) { + bus->add_device(DEBUG_START, this); +} + +bool debug_module_t::load(reg_t addr, size_t len, uint8_t* bytes) +{ + addr = DEBUG_START + addr; + + if (addr >= DEBUG_ROM_ENTRY && + (addr + len) <= (DEBUG_ROM_ENTRY + debug_rom_raw_len)) { + memcpy(bytes, debug_rom_raw + addr - DEBUG_ROM_ENTRY, len); + return true; + } + + if (addr >= DEBUG_ROM_WHERETO && (addr + len) <= (DEBUG_ROM_WHERETO + 4)) { + memcpy(bytes, debug_rom_whereto + addr - DEBUG_ROM_WHERETO, len); + return true; + } + + if (addr >= DEBUG_ROM_FLAGS && ((addr + len) <= DEBUG_ROM_FLAGS + 1024)) { + memcpy(bytes, debug_rom_flags + addr - DEBUG_ROM_FLAGS, len); + return true; + } + + if (addr >= debug_abstract_start && ((addr + len) <= (debug_abstract_start + sizeof(debug_abstract)))) { + memcpy(bytes, debug_abstract + addr - debug_abstract_start, len); + return true; + } + + if (addr >= debug_data_start && (addr + len) <= (debug_data_start + sizeof(dmdata))) { + memcpy(bytes, dmdata + addr - debug_data_start, len); + return true; + } + + if (addr >= debug_progbuf_start && ((addr + len) <= (debug_progbuf_start + program_buffer_bytes))) { + memcpy(bytes, program_buffer + addr - debug_progbuf_start, len); + return true; + } + + D(fprintf(stderr, "ERROR: invalid load from debug module: %zd bytes at 0x%016" + PRIx64 "\n", len, addr)); + + return false; +} + +bool debug_module_t::store(reg_t addr, size_t len, const uint8_t* bytes) +{ + D( + switch (len) { + case 4: + fprintf(stderr, "store(addr=0x%lx, len=%d, bytes=0x%08x); " + "hartsel=0x%x\n", addr, (unsigned) len, *(uint32_t *) bytes, + dmcontrol.hartsel); + break; + default: + fprintf(stderr, "store(addr=0x%lx, len=%d, bytes=...); " + "hartsel=0x%x\n", addr, (unsigned) len, dmcontrol.hartsel); + break; + } + ); + + uint8_t id_bytes[4]; + uint32_t id = 0; + if (len == 4) { + memcpy(id_bytes, bytes, 4); + id = read32(id_bytes, 0); + } + + addr = DEBUG_START + addr; + + if (addr >= debug_data_start && (addr + len) <= (debug_data_start + sizeof(dmdata))) { + memcpy(dmdata + addr - debug_data_start, bytes, len); + return true; + } + + if (addr >= debug_progbuf_start && ((addr + len) <= (debug_progbuf_start + program_buffer_bytes))) { + memcpy(program_buffer + addr - debug_progbuf_start, bytes, len); + + return true; + } + + if (addr == DEBUG_ROM_HALTED) { + assert (len == 4); + if (!hart_state[id].halted) { + hart_state[id].halted = true; + if (hart_state[id].haltgroup) { + for (unsigned i = 0; i < nprocs; i++) { + if (!hart_state[i].halted && + hart_state[i].haltgroup == hart_state[id].haltgroup) { + processor_t *proc = sim->get_core(i); + proc->halt_request = proc->HR_GROUP; + // TODO: What if the debugger comes and writes dmcontrol before the + // halt occurs? + } + } + } + } + if (dmcontrol.hartsel == id) { + if (0 == (debug_rom_flags[id] & (1 << DEBUG_ROM_FLAG_GO))){ + if (dmcontrol.hartsel == id) { + abstract_command_completed = true; + } + } + } + return true; + } + + if (addr == DEBUG_ROM_GOING) { + assert(len == 4); + debug_rom_flags[id] &= ~(1 << DEBUG_ROM_FLAG_GO); + return true; + } + + if (addr == DEBUG_ROM_RESUMING) { + assert (len == 4); + hart_state[id].halted = false; + hart_state[id].resumeack = true; + debug_rom_flags[id] &= ~(1 << DEBUG_ROM_FLAG_RESUME); + return true; + } + + if (addr == DEBUG_ROM_EXCEPTION) { + if (abstractcs.cmderr == CMDERR_NONE) { + abstractcs.cmderr = CMDERR_EXCEPTION; + } + return true; + } + + D(fprintf(stderr, "ERROR: invalid store to debug module: %zd bytes at 0x%016" + PRIx64 "\n", len, addr)); + return false; +} + +void debug_module_t::write32(uint8_t *memory, unsigned int index, uint32_t value) +{ + uint8_t* base = memory + index * 4; + base[0] = value & 0xff; + base[1] = (value >> 8) & 0xff; + base[2] = (value >> 16) & 0xff; + base[3] = (value >> 24) & 0xff; +} + +uint32_t debug_module_t::read32(uint8_t *memory, unsigned int index) +{ + uint8_t* base = memory + index * 4; + uint32_t value = ((uint32_t) base[0]) | + (((uint32_t) base[1]) << 8) | + (((uint32_t) base[2]) << 16) | + (((uint32_t) base[3]) << 24); + return value; +} + +processor_t *debug_module_t::processor(unsigned hartid) const +{ + processor_t *proc = NULL; + try { + proc = sim->get_core(hartid); + } catch (const std::out_of_range&) { + } + return proc; +} + +bool debug_module_t::hart_selected(unsigned hartid) const +{ + if (dmcontrol.hasel) { + return hartid == dmcontrol.hartsel || hart_array_mask[hartid]; + } else { + return hartid == dmcontrol.hartsel; + } +} + +unsigned debug_module_t::sb_access_bits() +{ + return 8 << sbcs.sbaccess; +} + +void debug_module_t::sb_autoincrement() +{ + if (!sbcs.autoincrement || !config.max_sba_data_width) + return; + + uint64_t value = sbaddress[0] + sb_access_bits() / 8; + sbaddress[0] = value; + uint32_t carry = value >> 32; + + value = sbaddress[1] + carry; + sbaddress[1] = value; + carry = value >> 32; + + value = sbaddress[2] + carry; + sbaddress[2] = value; + carry = value >> 32; + + sbaddress[3] += carry; +} + +void debug_module_t::sb_read() +{ + reg_t address = ((uint64_t) sbaddress[1] << 32) | sbaddress[0]; + try { + if (sbcs.sbaccess == 0 && config.max_sba_data_width >= 8) { + sbdata[0] = sim->debug_mmu->load_uint8(address); + } else if (sbcs.sbaccess == 1 && config.max_sba_data_width >= 16) { + sbdata[0] = sim->debug_mmu->load_uint16(address); + } else if (sbcs.sbaccess == 2 && config.max_sba_data_width >= 32) { + sbdata[0] = sim->debug_mmu->load_uint32(address); + } else if (sbcs.sbaccess == 3 && config.max_sba_data_width >= 64) { + uint64_t value = sim->debug_mmu->load_uint64(address); + sbdata[0] = value; + sbdata[1] = value >> 32; + } else { + sbcs.error = 3; + } + } catch (trap_load_access_fault& t) { + sbcs.error = 2; + } +} + +void debug_module_t::sb_write() +{ + reg_t address = ((uint64_t) sbaddress[1] << 32) | sbaddress[0]; + D(fprintf(stderr, "sb_write() 0x%x @ 0x%lx\n", sbdata[0], address)); + if (sbcs.sbaccess == 0 && config.max_sba_data_width >= 8) { + sim->debug_mmu->store_uint8(address, sbdata[0]); + } else if (sbcs.sbaccess == 1 && config.max_sba_data_width >= 16) { + sim->debug_mmu->store_uint16(address, sbdata[0]); + } else if (sbcs.sbaccess == 2 && config.max_sba_data_width >= 32) { + sim->debug_mmu->store_uint32(address, sbdata[0]); + } else if (sbcs.sbaccess == 3 && config.max_sba_data_width >= 64) { + sim->debug_mmu->store_uint64(address, + (((uint64_t) sbdata[1]) << 32) | sbdata[0]); + } else { + sbcs.error = 3; + } +} + +bool debug_module_t::dmi_read(unsigned address, uint32_t *value) +{ + uint32_t result = 0; + D(fprintf(stderr, "dmi_read(0x%x) -> ", address)); + if (address >= DM_DATA0 && address < DM_DATA0 + abstractcs.datacount) { + unsigned i = address - DM_DATA0; + result = read32(dmdata, i); + if (abstractcs.busy) { + result = -1; + D(fprintf(stderr, "\ndmi_read(0x%02x (data[%d]) -> -1 because abstractcs.busy==true\n", address, i)); + } + + if (abstractcs.busy && abstractcs.cmderr == CMDERR_NONE) { + abstractcs.cmderr = CMDERR_BUSY; + } + + if (!abstractcs.busy && ((abstractauto.autoexecdata >> i) & 1)) { + perform_abstract_command(); + } + } else if (address >= DM_PROGBUF0 && address < DM_PROGBUF0 + config.progbufsize) { + unsigned i = address - DM_PROGBUF0; + result = read32(program_buffer, i); + if (abstractcs.busy) { + result = -1; + D(fprintf(stderr, "\ndmi_read(0x%02x (progbuf[%d]) -> -1 because abstractcs.busy==true\n", address, i)); + } + if (!abstractcs.busy && ((abstractauto.autoexecprogbuf >> i) & 1)) { + perform_abstract_command(); + } + + } else { + switch (address) { + case DM_DMCONTROL: + { + result = set_field(result, DM_DMCONTROL_HALTREQ, dmcontrol.haltreq); + result = set_field(result, DM_DMCONTROL_RESUMEREQ, dmcontrol.resumereq); + result = set_field(result, DM_DMCONTROL_HARTSELHI, + dmcontrol.hartsel >> DM_DMCONTROL_HARTSELLO_LENGTH); + result = set_field(result, DM_DMCONTROL_HASEL, dmcontrol.hasel); + result = set_field(result, DM_DMCONTROL_HARTSELLO, dmcontrol.hartsel); + result = set_field(result, DM_DMCONTROL_HARTRESET, dmcontrol.hartreset); + result = set_field(result, DM_DMCONTROL_NDMRESET, dmcontrol.ndmreset); + result = set_field(result, DM_DMCONTROL_DMACTIVE, dmcontrol.dmactive); + } + break; + case DM_DMSTATUS: + { + dmstatus.allhalted = true; + dmstatus.anyhalted = false; + dmstatus.allrunning = true; + dmstatus.anyrunning = false; + dmstatus.allnonexistant = true; + dmstatus.allresumeack = true; + dmstatus.anyresumeack = false; + for (unsigned i = 0; i < nprocs; i++) { + if (hart_selected(i)) { + dmstatus.allnonexistant = false; + if (hart_state[i].resumeack) { + dmstatus.anyresumeack = true; + } else { + dmstatus.allresumeack = false; + } + if (hart_state[i].halted) { + dmstatus.allrunning = false; + dmstatus.anyhalted = true; + } else { + dmstatus.allhalted = false; + dmstatus.anyrunning = true; + } + } + } + + // We don't allow selecting non-existant harts through + // hart_array_mask, so the only way it's possible is by writing a + // non-existant hartsel. + dmstatus.anynonexistant = (dmcontrol.hartsel >= nprocs); + + dmstatus.allunavail = false; + dmstatus.anyunavail = false; + + result = set_field(result, DM_DMSTATUS_IMPEBREAK, + dmstatus.impebreak); + result = set_field(result, DM_DMSTATUS_ALLHAVERESET, + hart_state[dmcontrol.hartsel].havereset); + result = set_field(result, DM_DMSTATUS_ANYHAVERESET, + hart_state[dmcontrol.hartsel].havereset); + result = set_field(result, DM_DMSTATUS_ALLNONEXISTENT, dmstatus.allnonexistant); + result = set_field(result, DM_DMSTATUS_ALLUNAVAIL, dmstatus.allunavail); + result = set_field(result, DM_DMSTATUS_ALLRUNNING, dmstatus.allrunning); + result = set_field(result, DM_DMSTATUS_ALLHALTED, dmstatus.allhalted); + result = set_field(result, DM_DMSTATUS_ALLRESUMEACK, dmstatus.allresumeack); + result = set_field(result, DM_DMSTATUS_ANYNONEXISTENT, dmstatus.anynonexistant); + result = set_field(result, DM_DMSTATUS_ANYUNAVAIL, dmstatus.anyunavail); + result = set_field(result, DM_DMSTATUS_ANYRUNNING, dmstatus.anyrunning); + result = set_field(result, DM_DMSTATUS_ANYHALTED, dmstatus.anyhalted); + result = set_field(result, DM_DMSTATUS_ANYRESUMEACK, dmstatus.anyresumeack); + result = set_field(result, DM_DMSTATUS_AUTHENTICATED, dmstatus.authenticated); + result = set_field(result, DM_DMSTATUS_AUTHBUSY, dmstatus.authbusy); + result = set_field(result, DM_DMSTATUS_VERSION, dmstatus.version); + } + break; + case DM_ABSTRACTCS: + result = set_field(result, DM_ABSTRACTCS_CMDERR, abstractcs.cmderr); + result = set_field(result, DM_ABSTRACTCS_BUSY, abstractcs.busy); + result = set_field(result, DM_ABSTRACTCS_DATACOUNT, abstractcs.datacount); + result = set_field(result, DM_ABSTRACTCS_PROGBUFSIZE, + abstractcs.progbufsize); + break; + case DM_ABSTRACTAUTO: + result = set_field(result, DM_ABSTRACTAUTO_AUTOEXECPROGBUF, abstractauto.autoexecprogbuf); + result = set_field(result, DM_ABSTRACTAUTO_AUTOEXECDATA, abstractauto.autoexecdata); + break; + case DM_COMMAND: + result = 0; + break; + case DM_HARTINFO: + result = set_field(result, DM_HARTINFO_NSCRATCH, 1); + result = set_field(result, DM_HARTINFO_DATAACCESS, 1); + result = set_field(result, DM_HARTINFO_DATASIZE, abstractcs.datacount); + result = set_field(result, DM_HARTINFO_DATAADDR, debug_data_start); + break; + case DM_HAWINDOWSEL: + result = hawindowsel; + break; + case DM_HAWINDOW: + { + unsigned base = hawindowsel * 32; + for (unsigned i = 0; i < 32; i++) { + unsigned n = base + i; + if (n < nprocs && hart_array_mask[n]) { + result |= 1 << i; + } + } + } + break; + case DM_SBCS: + result = set_field(result, DM_SBCS_SBVERSION, sbcs.version); + result = set_field(result, DM_SBCS_SBREADONADDR, sbcs.readonaddr); + result = set_field(result, DM_SBCS_SBACCESS, sbcs.sbaccess); + result = set_field(result, DM_SBCS_SBAUTOINCREMENT, sbcs.autoincrement); + result = set_field(result, DM_SBCS_SBREADONDATA, sbcs.readondata); + result = set_field(result, DM_SBCS_SBERROR, sbcs.error); + result = set_field(result, DM_SBCS_SBASIZE, sbcs.asize); + result = set_field(result, DM_SBCS_SBACCESS128, sbcs.access128); + result = set_field(result, DM_SBCS_SBACCESS64, sbcs.access64); + result = set_field(result, DM_SBCS_SBACCESS32, sbcs.access32); + result = set_field(result, DM_SBCS_SBACCESS16, sbcs.access16); + result = set_field(result, DM_SBCS_SBACCESS8, sbcs.access8); + break; + case DM_SBADDRESS0: + result = sbaddress[0]; + break; + case DM_SBADDRESS1: + result = sbaddress[1]; + break; + case DM_SBADDRESS2: + result = sbaddress[2]; + break; + case DM_SBADDRESS3: + result = sbaddress[3]; + break; + case DM_SBDATA0: + result = sbdata[0]; + if (sbcs.error == 0) { + if (sbcs.readondata) { + sb_read(); + } + if (sbcs.error == 0) { + sb_autoincrement(); + } + } + break; + case DM_SBDATA1: + result = sbdata[1]; + break; + case DM_SBDATA2: + result = sbdata[2]; + break; + case DM_SBDATA3: + result = sbdata[3]; + break; + case DM_AUTHDATA: + result = challenge; + break; + case DM_DMCS2: + result = set_field(result, DM_DMCS2_GROUP, + hart_state[dmcontrol.hartsel].haltgroup); + break; + default: + result = 0; + D(fprintf(stderr, "Unexpected. Returning Error.")); + return false; + } + } + D(fprintf(stderr, "0x%x\n", result)); + *value = result; + return true; +} + +void debug_module_t::run_test_idle() +{ + if (rti_remaining > 0) { + rti_remaining--; + } + if (rti_remaining == 0 && abstractcs.busy && abstract_command_completed) { + abstractcs.busy = false; + } +} + +static bool is_fpu_reg(unsigned regno) +{ + return (regno >= 0x1020 && regno <= 0x103f) || regno == CSR_FFLAGS || + regno == CSR_FRM || regno == CSR_FCSR; +} + +bool debug_module_t::perform_abstract_command() +{ + if (abstractcs.cmderr != CMDERR_NONE) + return true; + if (abstractcs.busy) { + abstractcs.cmderr = CMDERR_BUSY; + return true; + } + + if ((command >> 24) == 0) { + // register access + unsigned size = get_field(command, AC_ACCESS_REGISTER_AARSIZE); + bool write = get_field(command, AC_ACCESS_REGISTER_WRITE); + unsigned regno = get_field(command, AC_ACCESS_REGISTER_REGNO); + + if (!hart_state[dmcontrol.hartsel].halted) { + abstractcs.cmderr = CMDERR_HALTRESUME; + return true; + } + + unsigned i = 0; + if (get_field(command, AC_ACCESS_REGISTER_TRANSFER)) { + + if (is_fpu_reg(regno)) { + // Save S0 + write32(debug_abstract, i++, csrw(S0, CSR_DSCRATCH0)); + // Save mstatus + write32(debug_abstract, i++, csrr(S0, CSR_MSTATUS)); + write32(debug_abstract, i++, csrw(S0, CSR_DSCRATCH1)); + // Set mstatus.fs + assert((MSTATUS_FS & 0xfff) == 0); + write32(debug_abstract, i++, lui(S0, MSTATUS_FS >> 12)); + write32(debug_abstract, i++, csrrs(ZERO, S0, CSR_MSTATUS)); + } + + if (regno < 0x1000 && config.support_abstract_csr_access) { + if (!is_fpu_reg(regno)) { + write32(debug_abstract, i++, csrw(S0, CSR_DSCRATCH0)); + } + + if (write) { + switch (size) { + case 2: + write32(debug_abstract, i++, lw(S0, ZERO, debug_data_start)); + break; + case 3: + write32(debug_abstract, i++, ld(S0, ZERO, debug_data_start)); + break; + default: + abstractcs.cmderr = CMDERR_NOTSUP; + return true; + } + write32(debug_abstract, i++, csrw(S0, regno)); + + } else { + write32(debug_abstract, i++, csrr(S0, regno)); + switch (size) { + case 2: + write32(debug_abstract, i++, sw(S0, ZERO, debug_data_start)); + break; + case 3: + write32(debug_abstract, i++, sd(S0, ZERO, debug_data_start)); + break; + default: + abstractcs.cmderr = CMDERR_NOTSUP; + return true; + } + } + if (!is_fpu_reg(regno)) { + write32(debug_abstract, i++, csrr(S0, CSR_DSCRATCH0)); + } + + } else if (regno >= 0x1000 && regno < 0x1020) { + unsigned regnum = regno - 0x1000; + + switch (size) { + case 2: + if (write) + write32(debug_abstract, i++, lw(regnum, ZERO, debug_data_start)); + else + write32(debug_abstract, i++, sw(regnum, ZERO, debug_data_start)); + break; + case 3: + if (write) + write32(debug_abstract, i++, ld(regnum, ZERO, debug_data_start)); + else + write32(debug_abstract, i++, sd(regnum, ZERO, debug_data_start)); + break; + default: + abstractcs.cmderr = CMDERR_NOTSUP; + return true; + } + + if (regno == 0x1000 + S0 && write) { + /* + * The exception handler starts out be restoring dscratch to s0, + * which was saved before executing the abstract memory region. Since + * we just wrote s0, also make sure to write that same value to + * dscratch in case an exception occurs in a program buffer that + * might be executed later. + */ + write32(debug_abstract, i++, csrw(S0, CSR_DSCRATCH0)); + } + + } else if (regno >= 0x1020 && regno < 0x1040) { + unsigned fprnum = regno - 0x1020; + + if (write) { + switch (size) { + case 2: + write32(debug_abstract, i++, flw(fprnum, ZERO, debug_data_start)); + break; + case 3: + write32(debug_abstract, i++, fld(fprnum, ZERO, debug_data_start)); + break; + default: + abstractcs.cmderr = CMDERR_NOTSUP; + return true; + } + + } else { + switch (size) { + case 2: + write32(debug_abstract, i++, fsw(fprnum, ZERO, debug_data_start)); + break; + case 3: + write32(debug_abstract, i++, fsd(fprnum, ZERO, debug_data_start)); + break; + default: + abstractcs.cmderr = CMDERR_NOTSUP; + return true; + } + } + + } else if (regno >= 0xc000 && (regno & 1) == 1) { + // Support odd-numbered custom registers, to allow for debugger testing. + unsigned custom_number = regno - 0xc000; + abstractcs.cmderr = CMDERR_NONE; + if (write) { + // Writing V to custom register N will cause future reads of N to + // return V, reads of N-1 will return V-1, etc. + custom_base = read32(dmdata, 0) - custom_number; + } else { + write32(dmdata, 0, custom_number + custom_base); + write32(dmdata, 1, 0); + } + return true; + + } else { + abstractcs.cmderr = CMDERR_NOTSUP; + return true; + } + + if (is_fpu_reg(regno)) { + // restore mstatus + write32(debug_abstract, i++, csrr(S0, CSR_DSCRATCH1)); + write32(debug_abstract, i++, csrw(S0, CSR_MSTATUS)); + // restore s0 + write32(debug_abstract, i++, csrr(S0, CSR_DSCRATCH0)); + } + } + + if (get_field(command, AC_ACCESS_REGISTER_POSTEXEC)) { + write32(debug_abstract, i, + jal(ZERO, debug_progbuf_start - debug_abstract_start - 4 * i)); + i++; + } else { + write32(debug_abstract, i++, ebreak()); + } + + debug_rom_flags[dmcontrol.hartsel] |= 1 << DEBUG_ROM_FLAG_GO; + rti_remaining = config.abstract_rti; + abstract_command_completed = false; + + abstractcs.busy = true; + } else { + abstractcs.cmderr = CMDERR_NOTSUP; + } + return true; +} + +bool debug_module_t::dmi_write(unsigned address, uint32_t value) +{ + D(fprintf(stderr, "dmi_write(0x%x, 0x%x)\n", address, value)); + + if (!dmstatus.authenticated && address != DM_AUTHDATA && + address != DM_DMCONTROL) + return false; + + if (address >= DM_DATA0 && address < DM_DATA0 + abstractcs.datacount) { + unsigned i = address - DM_DATA0; + if (!abstractcs.busy) + write32(dmdata, address - DM_DATA0, value); + + if (abstractcs.busy && abstractcs.cmderr == CMDERR_NONE) { + abstractcs.cmderr = CMDERR_BUSY; + } + + if (!abstractcs.busy && ((abstractauto.autoexecdata >> i) & 1)) { + perform_abstract_command(); + } + return true; + + } else if (address >= DM_PROGBUF0 && address < DM_PROGBUF0 + config.progbufsize) { + unsigned i = address - DM_PROGBUF0; + + if (!abstractcs.busy) + write32(program_buffer, i, value); + + if (!abstractcs.busy && ((abstractauto.autoexecprogbuf >> i) & 1)) { + perform_abstract_command(); + } + return true; + + } else { + switch (address) { + case DM_DMCONTROL: + { + if (!dmcontrol.dmactive && get_field(value, DM_DMCONTROL_DMACTIVE)) + reset(); + dmcontrol.dmactive = get_field(value, DM_DMCONTROL_DMACTIVE); + if (!dmstatus.authenticated || !dmcontrol.dmactive) + return true; + + dmcontrol.haltreq = get_field(value, DM_DMCONTROL_HALTREQ); + dmcontrol.resumereq = get_field(value, DM_DMCONTROL_RESUMEREQ); + dmcontrol.hartreset = get_field(value, DM_DMCONTROL_HARTRESET); + dmcontrol.ndmreset = get_field(value, DM_DMCONTROL_NDMRESET); + if (config.support_hasel) + dmcontrol.hasel = get_field(value, DM_DMCONTROL_HASEL); + else + dmcontrol.hasel = 0; + dmcontrol.hartsel = get_field(value, DM_DMCONTROL_HARTSELHI) << + DM_DMCONTROL_HARTSELLO_LENGTH; + dmcontrol.hartsel |= get_field(value, DM_DMCONTROL_HARTSELLO); + dmcontrol.hartsel &= (1L<halt_request = dmcontrol.haltreq ? proc->HR_REGULAR : proc->HR_NONE; + if (dmcontrol.haltreq) { + D(fprintf(stderr, "halt hart %d\n", i)); + } + if (dmcontrol.resumereq) { + D(fprintf(stderr, "resume hart %d\n", i)); + debug_rom_flags[i] |= (1 << DEBUG_ROM_FLAG_RESUME); + hart_state[i].resumeack = false; + } + if (dmcontrol.hartreset) { + proc->reset(); + } + } + } + } + + if (dmcontrol.ndmreset) { + for (size_t i = 0; i < sim->nprocs(); i++) { + processor_t *proc = sim->get_core(i); + proc->reset(); + } + } + } + return true; + + case DM_COMMAND: + command = value; + return perform_abstract_command(); + + case DM_HAWINDOWSEL: + hawindowsel = value & ((1U<> i) & 1; + } + } + } + return true; + + case DM_ABSTRACTCS: + abstractcs.cmderr = (cmderr_t) (((uint32_t) (abstractcs.cmderr)) & (~(uint32_t)(get_field(value, DM_ABSTRACTCS_CMDERR)))); + return true; + + case DM_ABSTRACTAUTO: + abstractauto.autoexecprogbuf = get_field(value, + DM_ABSTRACTAUTO_AUTOEXECPROGBUF); + abstractauto.autoexecdata = get_field(value, + DM_ABSTRACTAUTO_AUTOEXECDATA); + return true; + case DM_SBCS: + sbcs.readonaddr = get_field(value, DM_SBCS_SBREADONADDR); + sbcs.sbaccess = get_field(value, DM_SBCS_SBACCESS); + sbcs.autoincrement = get_field(value, DM_SBCS_SBAUTOINCREMENT); + sbcs.readondata = get_field(value, DM_SBCS_SBREADONDATA); + sbcs.error &= ~get_field(value, DM_SBCS_SBERROR); + return true; + case DM_SBADDRESS0: + sbaddress[0] = value; + if (sbcs.error == 0 && sbcs.readonaddr) { + sb_read(); + sb_autoincrement(); + } + return true; + case DM_SBADDRESS1: + sbaddress[1] = value; + return true; + case DM_SBADDRESS2: + sbaddress[2] = value; + return true; + case DM_SBADDRESS3: + sbaddress[3] = value; + return true; + case DM_SBDATA0: + sbdata[0] = value; + if (sbcs.error == 0) { + sb_write(); + if (sbcs.error == 0) { + sb_autoincrement(); + } + } + return true; + case DM_SBDATA1: + sbdata[1] = value; + return true; + case DM_SBDATA2: + sbdata[2] = value; + return true; + case DM_SBDATA3: + sbdata[3] = value; + return true; + case DM_AUTHDATA: + D(fprintf(stderr, "debug authentication: got 0x%x; 0x%x unlocks\n", value, + challenge + secret)); + if (config.require_authentication) { + if (value == challenge + secret) { + dmstatus.authenticated = true; + } else { + dmstatus.authenticated = false; + challenge = random(); + } + } + return true; + case DM_DMCS2: + if (config.support_haltgroups && get_field(value, DM_DMCS2_HGWRITE)) { + hart_state[dmcontrol.hartsel].haltgroup = get_field(value, + DM_DMCS2_GROUP); + } + return true; + } + } + return false; +} + +void debug_module_t::proc_reset(unsigned id) +{ + hart_state[id].havereset = true; + hart_state[id].halted = false; + hart_state[id].haltgroup = 0; +} diff --git a/vendor/riscv-isa-sim/riscv/debug_module.h b/vendor/riscv-isa-sim/riscv/debug_module.h new file mode 100644 index 00000000..d79ce7d1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/debug_module.h @@ -0,0 +1,193 @@ +// See LICENSE for license details. +#ifndef _RISCV_DEBUG_MODULE_H +#define _RISCV_DEBUG_MODULE_H + +#include + +#include "abstract_device.h" +#include "mmu.h" + +class sim_t; +class bus_t; + +typedef struct { + // Size of program_buffer in 32-bit words, as exposed to the rest of the + // world. + unsigned progbufsize; + unsigned max_sba_data_width; + bool require_authentication; + unsigned abstract_rti; + bool support_hasel; + bool support_abstract_csr_access; + bool support_haltgroups; + bool support_impebreak; +} debug_module_config_t; + +typedef struct { + bool haltreq; + bool resumereq; + bool hasel; + unsigned hartsel; + bool hartreset; + bool dmactive; + bool ndmreset; +} dmcontrol_t; + +typedef struct { + bool impebreak; + bool allhavereset; + bool anyhavereset; + bool allnonexistant; + bool anynonexistant; + bool allunavail; + bool anyunavail; + bool allrunning; + bool anyrunning; + bool allhalted; + bool anyhalted; + bool allresumeack; + bool anyresumeack; + bool authenticated; + bool authbusy; + bool cfgstrvalid; + unsigned version; +} dmstatus_t; + +typedef enum cmderr { + CMDERR_NONE = 0, + CMDERR_BUSY = 1, + CMDERR_NOTSUP = 2, + CMDERR_EXCEPTION = 3, + CMDERR_HALTRESUME = 4, + CMDERR_OTHER = 7 +} cmderr_t; + +typedef struct { + bool busy; + unsigned datacount; + unsigned progbufsize; + cmderr_t cmderr; +} abstractcs_t; + +typedef struct { + unsigned autoexecprogbuf; + unsigned autoexecdata; +} abstractauto_t; + +typedef struct { + unsigned version; + bool readonaddr; + unsigned sbaccess; + bool autoincrement; + bool readondata; + unsigned error; + unsigned asize; + bool access128; + bool access64; + bool access32; + bool access16; + bool access8; +} sbcs_t; + +typedef struct { + bool halted; + bool resumeack; + bool havereset; + uint8_t haltgroup; +} hart_debug_state_t; + +class debug_module_t : public abstract_device_t +{ + public: + /* + * If require_authentication is true, then a debugger must authenticate as + * follows: + * 1. Read a 32-bit value from authdata: + * 2. Write the value that was read back, plus one, to authdata. + * + * abstract_rti is extra run-test/idle cycles that each abstract command + * takes to execute. Useful for testing OpenOCD. + */ + debug_module_t(sim_t *sim, const debug_module_config_t &config); + ~debug_module_t(); + + void add_device(bus_t *bus); + + bool load(reg_t addr, size_t len, uint8_t* bytes); + bool store(reg_t addr, size_t len, const uint8_t* bytes); + + // Debug Module Interface that the debugger (in our case through JTAG DTM) + // uses to access the DM. + // Return true for success, false for failure. + bool dmi_read(unsigned address, uint32_t *value); + bool dmi_write(unsigned address, uint32_t value); + + // Called for every cycle the JTAG TAP spends in Run-Test/Idle. + void run_test_idle(); + + // Called when one of the attached harts was reset. + void proc_reset(unsigned id); + + private: + static const unsigned datasize = 2; + unsigned nprocs; + debug_module_config_t config; + // Actual size of the program buffer, which is 1 word bigger than we let on + // to implement the implicit ebreak at the end. + unsigned program_buffer_bytes; + static const unsigned debug_data_start = 0x380; + unsigned debug_progbuf_start; + + static const unsigned debug_abstract_size = 12; + unsigned debug_abstract_start; + // R/W this through custom registers, to allow debuggers to test that + // functionality. + unsigned custom_base; + + // We only support 1024 harts currently. More requires at least resizing + // the arrays below, and their corresponding special memory regions. + unsigned hartsellen = 10; + + sim_t *sim; + + uint8_t debug_rom_whereto[4]; + uint8_t debug_abstract[debug_abstract_size * 4]; + uint8_t *program_buffer; + uint8_t dmdata[datasize * 4]; + + std::vector hart_state; + uint8_t debug_rom_flags[1024]; + + void write32(uint8_t *rom, unsigned int index, uint32_t value); + uint32_t read32(uint8_t *rom, unsigned int index); + + void sb_autoincrement(); + void sb_read(); + void sb_write(); + unsigned sb_access_bits(); + + dmcontrol_t dmcontrol; + dmstatus_t dmstatus; + abstractcs_t abstractcs; + abstractauto_t abstractauto; + uint32_t command; + uint16_t hawindowsel; + std::vector hart_array_mask; + + sbcs_t sbcs; + uint32_t sbaddress[4]; + uint32_t sbdata[4]; + + uint32_t challenge; + const uint32_t secret = 1; + + processor_t *processor(unsigned hartid) const; + bool hart_selected(unsigned hartid) const; + void reset(); + bool perform_abstract_command(); + + bool abstract_command_completed; + unsigned rti_remaining; +}; + +#endif diff --git a/vendor/riscv-isa-sim/riscv/debug_rom_defines.h b/vendor/riscv-isa-sim/riscv/debug_rom_defines.h new file mode 100644 index 00000000..616cf590 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/debug_rom_defines.h @@ -0,0 +1,23 @@ +// See LICENSE file for license details. + +#ifndef DEBUG_ROM_DEFINES_H +#define DEBUG_ROM_DEFINES_H + +// These are implementation-specific addresses in the Debug Module +#define DEBUG_ROM_HALTED 0x100 +#define DEBUG_ROM_GOING 0x104 +#define DEBUG_ROM_RESUMING 0x108 +#define DEBUG_ROM_EXCEPTION 0x10C + +// Region of memory where each hart has 1 +// byte to read. +#define DEBUG_ROM_FLAGS 0x400 +#define DEBUG_ROM_FLAG_GO 0 +#define DEBUG_ROM_FLAG_RESUME 1 + +// These needs to match the link.ld +#define DEBUG_ROM_WHERETO 0x300 +#define DEBUG_ROM_ENTRY 0x800 +#define DEBUG_ROM_TVEC 0x808 + +#endif diff --git a/vendor/riscv-isa-sim/riscv/decode.h b/vendor/riscv-isa-sim/riscv/decode.h new file mode 100644 index 00000000..611c9107 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/decode.h @@ -0,0 +1,2996 @@ +// See LICENSE for license details. + +#ifndef _RISCV_DECODE_H +#define _RISCV_DECODE_H + +#if (-1 != ~0) || ((-1 >> 1) != -1) +# error spike requires a two''s-complement c++ implementation +#endif + +#include +#include +#include +#include +#include "encoding.h" +#include "config.h" +#include "common.h" +#include "softfloat_types.h" +#include "specialize.h" +#include + +typedef int64_t sreg_t; +typedef uint64_t reg_t; + +#ifdef __SIZEOF_INT128__ +typedef __int128 int128_t; +typedef unsigned __int128 uint128_t; +#endif + +const int NXPR = 32; +const int NFPR = 32; +const int NVPR = 32; +const int NCSR = 4096; + +#define X_RA 1 +#define X_SP 2 + +#define VCSR_VXRM_SHIFT 1 +#define VCSR_VXRM (0x3 << VCSR_VXRM_SHIFT) + +#define VCSR_VXSAT_SHIFT 0 +#define VCSR_VXSAT (0x1 << VCSR_VXSAT_SHIFT) + +#define FP_RD_NE 0 +#define FP_RD_0 1 +#define FP_RD_DN 2 +#define FP_RD_UP 3 +#define FP_RD_NMM 4 + +#define FSR_RD_SHIFT 5 +#define FSR_RD (0x7 << FSR_RD_SHIFT) + +#define FPEXC_NX 0x01 +#define FPEXC_UF 0x02 +#define FPEXC_OF 0x04 +#define FPEXC_DZ 0x08 +#define FPEXC_NV 0x10 + +#define FSR_AEXC_SHIFT 0 +#define FSR_NVA (FPEXC_NV << FSR_AEXC_SHIFT) +#define FSR_OFA (FPEXC_OF << FSR_AEXC_SHIFT) +#define FSR_UFA (FPEXC_UF << FSR_AEXC_SHIFT) +#define FSR_DZA (FPEXC_DZ << FSR_AEXC_SHIFT) +#define FSR_NXA (FPEXC_NX << FSR_AEXC_SHIFT) +#define FSR_AEXC (FSR_NVA | FSR_OFA | FSR_UFA | FSR_DZA | FSR_NXA) + +#define insn_length(x) \ + (((x) & 0x03) < 0x03 ? 2 : \ + ((x) & 0x1f) < 0x1f ? 4 : \ + ((x) & 0x3f) < 0x3f ? 6 : \ + ((x) & 0x7f) == 0x7f ? 4 : \ + 8) +#define MAX_INSN_LENGTH 8 +#define PC_ALIGN 2 + +typedef uint64_t insn_bits_t; +class insn_t +{ +public: + insn_t() = default; + insn_t(insn_bits_t bits) : b(bits) {} + insn_bits_t bits() { return b & ~((UINT64_MAX) << (length() * 8)); } + int length() { return insn_length(b); } + int64_t i_imm() { return int64_t(b) >> 20; } + int64_t shamt() { return x(20, 6); } + int64_t s_imm() { return x(7, 5) + (xs(25, 7) << 5); } + int64_t sb_imm() { return (x(8, 4) << 1) + (x(25, 6) << 5) + (x(7, 1) << 11) + (imm_sign() << 12); } + int64_t u_imm() { return int64_t(b) >> 12 << 12; } + int64_t uj_imm() { return (x(21, 10) << 1) + (x(20, 1) << 11) + (x(12, 8) << 12) + (imm_sign() << 20); } + uint64_t rd() { return x(7, 5); } + uint64_t rs1() { return x(15, 5); } + uint64_t rs2() { return x(20, 5); } + uint64_t rs3() { return x(27, 5); } + uint64_t rm() { return x(12, 3); } + uint64_t csr() { return x(20, 12); } + uint64_t iorw() { return x(20, 8); } + uint64_t bs() { return x(30, 2); } // Crypto ISE - SM4/AES32 byte select. + uint64_t rcon() { return x(20, 4); } // Crypto ISE - AES64 round const. + + int64_t rvc_imm() { return x(2, 5) + (xs(12, 1) << 5); } + int64_t rvc_zimm() { return x(2, 5) + (x(12, 1) << 5); } + int64_t rvc_addi4spn_imm() { return (x(6, 1) << 2) + (x(5, 1) << 3) + (x(11, 2) << 4) + (x(7, 4) << 6); } + int64_t rvc_addi16sp_imm() { return (x(6, 1) << 4) + (x(2, 1) << 5) + (x(5, 1) << 6) + (x(3, 2) << 7) + (xs(12, 1) << 9); } + int64_t rvc_lwsp_imm() { return (x(4, 3) << 2) + (x(12, 1) << 5) + (x(2, 2) << 6); } + int64_t rvc_ldsp_imm() { return (x(5, 2) << 3) + (x(12, 1) << 5) + (x(2, 3) << 6); } + int64_t rvc_swsp_imm() { return (x(9, 4) << 2) + (x(7, 2) << 6); } + int64_t rvc_sdsp_imm() { return (x(10, 3) << 3) + (x(7, 3) << 6); } + int64_t rvc_lw_imm() { return (x(6, 1) << 2) + (x(10, 3) << 3) + (x(5, 1) << 6); } + int64_t rvc_ld_imm() { return (x(10, 3) << 3) + (x(5, 2) << 6); } + int64_t rvc_j_imm() { return (x(3, 3) << 1) + (x(11, 1) << 4) + (x(2, 1) << 5) + (x(7, 1) << 6) + (x(6, 1) << 7) + (x(9, 2) << 8) + (x(8, 1) << 10) + (xs(12, 1) << 11); } + int64_t rvc_b_imm() { return (x(3, 2) << 1) + (x(10, 2) << 3) + (x(2, 1) << 5) + (x(5, 2) << 6) + (xs(12, 1) << 8); } + int64_t rvc_simm3() { return x(10, 3); } + uint64_t rvc_rd() { return rd(); } + uint64_t rvc_rs1() { return rd(); } + uint64_t rvc_rs2() { return x(2, 5); } + uint64_t rvc_rs1s() { return 8 + x(7, 3); } + uint64_t rvc_rs2s() { return 8 + x(2, 3); } + + uint64_t v_vm() { return x(25, 1); } + uint64_t v_wd() { return x(26, 1); } + uint64_t v_nf() { return x(29, 3); } + uint64_t v_simm5() { return xs(15, 5); } + uint64_t v_zimm5() { return x(15, 5); } + uint64_t v_zimm10() { return x(20, 10); } + uint64_t v_zimm11() { return x(20, 11); } + uint64_t v_lmul() { return x(20, 2); } + uint64_t v_frac_lmul() { return x(22, 1); } + uint64_t v_sew() { return 1 << (x(23, 3) + 3); } + uint64_t v_width() { return x(12, 3); } + uint64_t v_mop() { return x(26, 2); } + uint64_t v_lumop() { return x(20, 5); } + uint64_t v_sumop() { return x(20, 5); } + uint64_t v_vta() { return x(26, 1); } + uint64_t v_vma() { return x(27, 1); } + uint64_t v_mew() { return x(28, 1); } + + uint64_t p_imm2() { return x(20, 2); } + uint64_t p_imm3() { return x(20, 3); } + uint64_t p_imm4() { return x(20, 4); } + uint64_t p_imm5() { return x(20, 5); } + uint64_t p_imm6() { return x(20, 6); } + +private: + insn_bits_t b; + uint64_t x(int lo, int len) { return (b >> lo) & ((insn_bits_t(1) << len) - 1); } + uint64_t xs(int lo, int len) { return int64_t(b) << (64 - lo - len) >> (64 - len); } + uint64_t imm_sign() { return xs(63, 1); } +}; + +template +class regfile_t +{ +public: + void write(size_t i, T value) + { + if (!zero_reg || i != 0) + data[i] = value; + } + const T& operator [] (size_t i) const + { + return data[i]; + } + regfile_t() + { + reset(); + } + void reset() + { + memset(data, 0, sizeof(data)); + } +private: + T data[N]; +}; + +// helpful macros, etc +#define MMU (*p->get_mmu()) +#define STATE (*p->get_state()) +#define FLEN (p->get_flen()) +#define CHECK_REG(reg) ((void) 0) +#define READ_REG(reg) ({ CHECK_REG(reg); STATE.XPR[reg]; }) +#define READ_FREG(reg) STATE.FPR[reg] +#define RD READ_REG(insn.rd()) +#define RS1 READ_REG(insn.rs1()) +#define RS2 READ_REG(insn.rs2()) +#define RS3 READ_REG(insn.rs3()) +#define WRITE_RD(value) WRITE_REG(insn.rd(), value) + +#ifndef RISCV_ENABLE_COMMITLOG +# define WRITE_REG(reg, value) ({ CHECK_REG(reg); STATE.XPR.write(reg, value); }) +# define WRITE_FREG(reg, value) DO_WRITE_FREG(reg, freg(value)) +# define WRITE_VSTATUS {} +#else + /* 0 : int + * 1 : floating + * 2 : vector reg + * 3 : vector hint + * 4 : csr + */ +# define WRITE_REG(reg, value) ({ \ + reg_t wdata = (value); /* value may have side effects */ \ + STATE.log_reg_write[(reg) << 4] = {wdata, 0}; \ + CHECK_REG(reg); \ + STATE.XPR.write(reg, wdata); \ + }) +# define WRITE_FREG(reg, value) ({ \ + freg_t wdata = freg(value); /* value may have side effects */ \ + STATE.log_reg_write[((reg) << 4) | 1] = wdata; \ + DO_WRITE_FREG(reg, wdata); \ + }) +# define WRITE_VSTATUS STATE.log_reg_write[3] = {0, 0}; +#endif + +// RVC macros +#define WRITE_RVC_RS1S(value) WRITE_REG(insn.rvc_rs1s(), value) +#define WRITE_RVC_RS2S(value) WRITE_REG(insn.rvc_rs2s(), value) +#define WRITE_RVC_FRS2S(value) WRITE_FREG(insn.rvc_rs2s(), value) +#define RVC_RS1 READ_REG(insn.rvc_rs1()) +#define RVC_RS2 READ_REG(insn.rvc_rs2()) +#define RVC_RS1S READ_REG(insn.rvc_rs1s()) +#define RVC_RS2S READ_REG(insn.rvc_rs2s()) +#define RVC_FRS2 READ_FREG(insn.rvc_rs2()) +#define RVC_FRS2S READ_FREG(insn.rvc_rs2s()) +#define RVC_SP READ_REG(X_SP) + +// FPU macros +#define FRS1 READ_FREG(insn.rs1()) +#define FRS2 READ_FREG(insn.rs2()) +#define FRS3 READ_FREG(insn.rs3()) +#define dirty_fp_state STATE.sstatus->dirty(SSTATUS_FS) +#define dirty_ext_state STATE.sstatus->dirty(SSTATUS_XS) +#define dirty_vs_state STATE.sstatus->dirty(SSTATUS_VS) +#define DO_WRITE_FREG(reg, value) (STATE.FPR.write(reg, value), dirty_fp_state) +#define WRITE_FRD(value) WRITE_FREG(insn.rd(), value) + +#define SHAMT (insn.i_imm() & 0x3F) +#define BRANCH_TARGET (pc + insn.sb_imm()) +#define JUMP_TARGET (pc + insn.uj_imm()) +#define RM ({ int rm = insn.rm(); \ + if (rm == 7) rm = STATE.frm->read(); \ + if (rm > 4) throw trap_illegal_instruction(insn.bits()); \ + rm; }) + +#define get_field(reg, mask) (((reg) & (decltype(reg))(mask)) / ((mask) & ~((mask) << 1))) +#define set_field(reg, mask, val) (((reg) & ~(decltype(reg))(mask)) | (((decltype(reg))(val) * ((mask) & ~((mask) << 1))) & (decltype(reg))(mask))) + +#define require_privilege(p) require(STATE.prv >= (p)) +#define require_novirt() if (unlikely(STATE.v)) throw trap_virtual_instruction(insn.bits()) +#define require_rv64 require(xlen == 64) +#define require_rv32 require(xlen == 32) +#define require_extension(s) require(p->extension_enabled(s)) +#define require_either_extension(A,B) require(p->extension_enabled(A) || p->extension_enabled(B)); +#define require_impl(s) require(p->supports_impl(s)) +#define require_fp require(STATE.sstatus->enabled(SSTATUS_FS)) +#define require_accelerator require(STATE.sstatus->enabled(SSTATUS_XS)) +#define require_vector_vs require(STATE.sstatus->enabled(SSTATUS_VS)) +#define require_vector(alu) \ + do { \ + require_vector_vs; \ + require_extension('V'); \ + require(!P.VU.vill); \ + if (alu && !P.VU.vstart_alu) \ + require(P.VU.vstart->read() == 0); \ + WRITE_VSTATUS; \ + dirty_vs_state; \ + } while (0); +#define require_vector_novtype(is_log, alu) \ + do { \ + require_vector_vs; \ + require_extension('V'); \ + if (alu && !P.VU.vstart_alu) \ + require(P.VU.vstart->read() == 0); \ + if (is_log) \ + WRITE_VSTATUS; \ + dirty_vs_state; \ + } while (0); +#define require_align(val, pos) require(is_aligned(val, pos)) +#define require_noover(astart, asize, bstart, bsize) \ + require(!is_overlapped(astart, asize, bstart, bsize)) +#define require_noover_widen(astart, asize, bstart, bsize) \ + require(!is_overlapped_widen(astart, asize, bstart, bsize)) +#define require_vm do { if (insn.v_vm() == 0) require(insn.rd() != 0); } while (0); +#define require_envcfg(field) \ + do { \ + if (((STATE.prv != PRV_M) && (m##field == 0)) || \ + ((STATE.prv == PRV_U && !STATE.v) && (s##field == 0))) \ + throw trap_illegal_instruction(insn.bits()); \ + else if (STATE.v && ((h##field == 0) || \ + ((STATE.prv == PRV_U) && (s##field == 0)))) \ + throw trap_virtual_instruction(insn.bits()); \ + } while (0); + +#define set_fp_exceptions ({ if (softfloat_exceptionFlags) { \ + STATE.fflags->write(STATE.fflags->read() | softfloat_exceptionFlags); \ + } \ + softfloat_exceptionFlags = 0; }) + +#define sext32(x) ((sreg_t)(int32_t)(x)) +#define zext32(x) ((reg_t)(uint32_t)(x)) +#define sext_xlen(x) (((sreg_t)(x) << (64 - xlen)) >> (64 - xlen)) +#define zext(x, pos) (((reg_t)(x) << (64 - (pos))) >> (64 - (pos))) +#define zext_xlen(x) zext(x, xlen) + +#define set_pc(x) \ + do { p->check_pc_alignment(x); \ + npc = sext_xlen(x); \ + } while (0) + +#define set_pc_and_serialize(x) \ + do { reg_t __npc = (x) & p->pc_alignment_mask(); \ + npc = PC_SERIALIZE_AFTER; \ + STATE.pc = __npc; \ + } while (0) + +class wait_for_interrupt_t {}; + +#define wfi() \ + do { set_pc_and_serialize(npc); \ + npc = PC_SERIALIZE_WFI; \ + throw wait_for_interrupt_t(); \ + } while (0) + +#define serialize() set_pc_and_serialize(npc) + +/* Sentinel PC values to serialize simulator pipeline */ +#define PC_SERIALIZE_BEFORE 3 +#define PC_SERIALIZE_AFTER 5 +#define PC_SERIALIZE_WFI 7 +#define invalid_pc(pc) ((pc) & 1) + +/* Convenience wrappers to simplify softfloat code sequences */ +#define isBoxedF16(r) (isBoxedF32(r) && ((uint64_t)((r.v[0] >> 16) + 1) == ((uint64_t)1 << 48))) +#define unboxF16(r) (isBoxedF16(r) ? (uint16_t)r.v[0] : defaultNaNF16UI) +#define isBoxedF32(r) (isBoxedF64(r) && ((uint32_t)((r.v[0] >> 32) + 1) == 0)) +#define unboxF32(r) (isBoxedF32(r) ? (uint32_t)r.v[0] : defaultNaNF32UI) +#define isBoxedF64(r) ((r.v[1] + 1) == 0) +#define unboxF64(r) (isBoxedF64(r) ? r.v[0] : defaultNaNF64UI) +typedef float128_t freg_t; +inline float16_t f16(uint16_t v) { return { v }; } +inline float32_t f32(uint32_t v) { return { v }; } +inline float64_t f64(uint64_t v) { return { v }; } +inline float16_t f16(freg_t r) { return f16(unboxF16(r)); } +inline float32_t f32(freg_t r) { return f32(unboxF32(r)); } +inline float64_t f64(freg_t r) { return f64(unboxF64(r)); } +inline float128_t f128(freg_t r) { return r; } +inline freg_t freg(float16_t f) { return { ((uint64_t)-1 << 16) | f.v, (uint64_t)-1 }; } +inline freg_t freg(float32_t f) { return { ((uint64_t)-1 << 32) | f.v, (uint64_t)-1 }; } +inline freg_t freg(float64_t f) { return { f.v, (uint64_t)-1 }; } +inline freg_t freg(float128_t f) { return f; } +#define F16_SIGN ((uint16_t)1 << 15) +#define F32_SIGN ((uint32_t)1 << 31) +#define F64_SIGN ((uint64_t)1 << 63) +#define fsgnj16(a, b, n, x) \ + f16((f16(a).v & ~F16_SIGN) | ((((x) ? f16(a).v : (n) ? F16_SIGN : 0) ^ f16(b).v) & F16_SIGN)) +#define fsgnj32(a, b, n, x) \ + f32((f32(a).v & ~F32_SIGN) | ((((x) ? f32(a).v : (n) ? F32_SIGN : 0) ^ f32(b).v) & F32_SIGN)) +#define fsgnj64(a, b, n, x) \ + f64((f64(a).v & ~F64_SIGN) | ((((x) ? f64(a).v : (n) ? F64_SIGN : 0) ^ f64(b).v) & F64_SIGN)) + +#define isNaNF128(x) isNaNF128UI(x.v[1], x.v[0]) +inline float128_t defaultNaNF128() +{ + float128_t nan; + nan.v[1] = defaultNaNF128UI64; + nan.v[0] = defaultNaNF128UI0; + return nan; +} +inline freg_t fsgnj128(freg_t a, freg_t b, bool n, bool x) +{ + a.v[1] = (a.v[1] & ~F64_SIGN) | (((x ? a.v[1] : n ? F64_SIGN : 0) ^ b.v[1]) & F64_SIGN); + return a; +} +inline freg_t f128_negate(freg_t a) +{ + a.v[1] ^= F64_SIGN; + return a; +} + +#define validate_csr(which, write) ({ \ + if (!STATE.serialized) return PC_SERIALIZE_BEFORE; \ + STATE.serialized = false; \ + /* permissions check occurs in get_csr */ \ + (which); }) + +/* For debug only. This will fail if the native machine's float types are not IEEE */ +inline float to_f(float32_t f) { float r; memcpy(&r, &f, sizeof(r)); return r; } +inline double to_f(float64_t f) { double r; memcpy(&r, &f, sizeof(r)); return r; } +inline long double to_f(float128_t f) { long double r; memcpy(&r, &f, sizeof(r)); return r; } + +// Vector macros +#define e8 8 // 8b elements +#define e16 16 // 16b elements +#define e32 32 // 32b elements +#define e64 64 // 64b elements +#define e128 128 // 128b elements +#define e256 256 // 256b elements +#define e512 512 // 512b elements +#define e1024 1024 // 1024b elements + +#define vsext(x, sew) (((sreg_t)(x) << (64 - sew)) >> (64 - sew)) +#define vzext(x, sew) (((reg_t)(x) << (64 - sew)) >> (64 - sew)) + +#define DEBUG_RVV 0 + +#if DEBUG_RVV +#define DEBUG_RVV_FP_VV \ + printf("vfp(%lu) vd=%f vs1=%f vs2=%f\n", i, to_f(vd), to_f(vs1), to_f(vs2)); +#define DEBUG_RVV_FP_VF \ + printf("vfp(%lu) vd=%f vs1=%f vs2=%f\n", i, to_f(vd), to_f(rs1), to_f(vs2)); +#define DEBUG_RVV_FMA_VV \ + printf("vfma(%lu) vd=%f vs1=%f vs2=%f vd_old=%f\n", i, to_f(vd), to_f(vs1), to_f(vs2), to_f(vd_old)); +#define DEBUG_RVV_FMA_VF \ + printf("vfma(%lu) vd=%f vs1=%f vs2=%f vd_old=%f\n", i, to_f(vd), to_f(rs1), to_f(vs2), to_f(vd_old)); +#else +#define DEBUG_RVV_FP_VV 0 +#define DEBUG_RVV_FP_VF 0 +#define DEBUG_RVV_FMA_VV 0 +#define DEBUG_RVV_FMA_VF 0 +#endif + +// +// vector: masking skip helper +// +#define VI_MASK_VARS \ + const int midx = i / 64; \ + const int mpos = i % 64; + +#define VI_LOOP_ELEMENT_SKIP(BODY) \ + VI_MASK_VARS \ + if (insn.v_vm() == 0) { \ + BODY; \ + bool skip = ((P.VU.elt(0, midx) >> mpos) & 0x1) == 0; \ + if (skip) { \ + continue; \ + } \ + } + +#define VI_ELEMENT_SKIP(inx) \ + if (inx >= vl) { \ + continue; \ + } else if (inx < P.VU.vstart->read()) { \ + continue; \ + } else { \ + VI_LOOP_ELEMENT_SKIP(); \ + } + +// +// vector: operation and register acccess check helper +// +static inline bool is_overlapped(const int astart, int asize, + const int bstart, int bsize) +{ + asize = asize == 0 ? 1 : asize; + bsize = bsize == 0 ? 1 : bsize; + + const int aend = astart + asize; + const int bend = bstart + bsize; + + return std::max(aend, bend) - std::min(astart, bstart) < asize + bsize; +} + +static inline bool is_overlapped_widen(const int astart, int asize, + const int bstart, int bsize) +{ + asize = asize == 0 ? 1 : asize; + bsize = bsize == 0 ? 1 : bsize; + + const int aend = astart + asize; + const int bend = bstart + bsize; + + if (astart < bstart && + is_overlapped(astart, asize, bstart, bsize) && + !is_overlapped(astart, asize, bstart + bsize, bsize)) { + return false; + } else { + return std::max(aend, bend) - std::min(astart, bstart) < asize + bsize; + } +} + +static inline bool is_aligned(const unsigned val, const unsigned pos) +{ + return pos ? (val & (pos - 1)) == 0 : true; +} + +#define VI_NARROW_CHECK_COMMON \ + require_vector(true); \ + require(P.VU.vflmul <= 4); \ + require(P.VU.vsew * 2 <= P.VU.ELEN); \ + require_align(insn.rs2(), P.VU.vflmul * 2); \ + require_align(insn.rd(), P.VU.vflmul); \ + require_vm; \ + +#define VI_WIDE_CHECK_COMMON \ + require_vector(true); \ + require(P.VU.vflmul <= 4); \ + require(P.VU.vsew * 2 <= P.VU.ELEN); \ + require_align(insn.rd(), P.VU.vflmul * 2); \ + require_vm; \ + +#define VI_CHECK_ST_INDEX(elt_width) \ + require_vector(false); \ + float vemul = ((float)elt_width / P.VU.vsew * P.VU.vflmul); \ + require(vemul >= 0.125 && vemul <= 8); \ + reg_t emul = vemul < 1 ? 1 : vemul; \ + reg_t flmul = P.VU.vflmul < 1 ? 1 : P.VU.vflmul; \ + require_align(insn.rd(), P.VU.vflmul); \ + require_align(insn.rs2(), vemul); \ + require((nf * flmul) <= (NVPR / 4) && \ + (insn.rd() + nf * flmul) <= NVPR); \ + +#define VI_CHECK_LD_INDEX(elt_width) \ + VI_CHECK_ST_INDEX(elt_width); \ + for (reg_t idx = 0; idx < nf; ++idx) { \ + reg_t flmul = P.VU.vflmul < 1 ? 1 : P.VU.vflmul; \ + reg_t seg_vd = insn.rd() + flmul * idx; \ + if (elt_width > P.VU.vsew) { \ + if (seg_vd != insn.rs2()) \ + require_noover(seg_vd, P.VU.vflmul, insn.rs2(), vemul); \ + } else if (elt_width < P.VU.vsew) { \ + if (vemul < 1) { \ + require_noover(seg_vd, P.VU.vflmul, insn.rs2(), vemul); \ + } else { \ + require_noover_widen(seg_vd, P.VU.vflmul, insn.rs2(), vemul); \ + } \ + } \ + if (nf >= 2) { \ + require_noover(seg_vd, P.VU.vflmul, insn.rs2(), vemul); \ + } \ + } \ + require_vm; \ + +#define VI_CHECK_MSS(is_vs1) \ + if (insn.rd() != insn.rs2()) \ + require_noover(insn.rd(), 1, insn.rs2(), P.VU.vflmul); \ + require_align(insn.rs2(), P.VU.vflmul); \ + if (is_vs1) { \ + if (insn.rd() != insn.rs1()) \ + require_noover(insn.rd(), 1, insn.rs1(), P.VU.vflmul); \ + require_align(insn.rs1(), P.VU.vflmul); \ + } \ + +#define VI_CHECK_SSS(is_vs1) \ + require_vm; \ + if (P.VU.vflmul > 1) { \ + require_align(insn.rd(), P.VU.vflmul); \ + require_align(insn.rs2(), P.VU.vflmul); \ + if (is_vs1) { \ + require_align(insn.rs1(), P.VU.vflmul); \ + } \ + } + +#define VI_CHECK_STORE(elt_width, is_mask_ldst) \ + require_vector(false); \ + reg_t veew = is_mask_ldst ? 1 : sizeof(elt_width##_t) * 8; \ + float vemul = is_mask_ldst ? 1 : ((float)veew / P.VU.vsew * P.VU.vflmul); \ + reg_t emul = vemul < 1 ? 1 : vemul; \ + require(vemul >= 0.125 && vemul <= 8); \ + require_align(insn.rd(), vemul); \ + require((nf * emul) <= (NVPR / 4) && \ + (insn.rd() + nf * emul) <= NVPR); \ + require(veew <= P.VU.ELEN); \ + +#define VI_CHECK_LOAD(elt_width, is_mask_ldst) \ + VI_CHECK_STORE(elt_width, is_mask_ldst); \ + require_vm; \ + +#define VI_CHECK_DSS(is_vs1) \ + VI_WIDE_CHECK_COMMON; \ + require_align(insn.rs2(), P.VU.vflmul); \ + if (P.VU.vflmul < 1) { \ + require_noover(insn.rd(), P.VU.vflmul * 2, insn.rs2(), P.VU.vflmul); \ + } else { \ + require_noover_widen(insn.rd(), P.VU.vflmul * 2, insn.rs2(), P.VU.vflmul); \ + } \ + if (is_vs1) { \ + require_align(insn.rs1(), P.VU.vflmul); \ + if (P.VU.vflmul < 1) { \ + require_noover(insn.rd(), P.VU.vflmul * 2, insn.rs1(), P.VU.vflmul); \ + } else { \ + require_noover_widen(insn.rd(), P.VU.vflmul * 2, insn.rs1(), P.VU.vflmul); \ + } \ + } + +#define VI_CHECK_DDS(is_rs) \ + VI_WIDE_CHECK_COMMON; \ + require_align(insn.rs2(), P.VU.vflmul * 2); \ + if (is_rs) { \ + require_align(insn.rs1(), P.VU.vflmul); \ + if (P.VU.vflmul < 1) { \ + require_noover(insn.rd(), P.VU.vflmul * 2, insn.rs1(), P.VU.vflmul); \ + } else { \ + require_noover_widen(insn.rd(), P.VU.vflmul * 2, insn.rs1(), P.VU.vflmul); \ + } \ + } + +#define VI_CHECK_SDS(is_vs1) \ + VI_NARROW_CHECK_COMMON; \ + if (insn.rd() != insn.rs2()) \ + require_noover(insn.rd(), P.VU.vflmul, insn.rs2(), P.VU.vflmul * 2); \ + if (is_vs1) \ + require_align(insn.rs1(), P.VU.vflmul); \ + +#define VI_CHECK_REDUCTION(is_wide) \ + require_vector(true); \ + if (is_wide) { \ + require(P.VU.vsew * 2 <= P.VU.ELEN); \ + } \ + require_align(insn.rs2(), P.VU.vflmul); \ + require(P.VU.vstart->read() == 0); \ + +#define VI_CHECK_SLIDE(is_over) \ + require_align(insn.rs2(), P.VU.vflmul); \ + require_align(insn.rd(), P.VU.vflmul); \ + require_vm; \ + if (is_over) \ + require(insn.rd() != insn.rs2()); \ + + +// +// vector: loop header and end helper +// +#define VI_GENERAL_LOOP_BASE \ + require(P.VU.vsew >= e8 && P.VU.vsew <= e64); \ + require_vector(true); \ + reg_t vl = P.VU.vl->read(); \ + reg_t sew = P.VU.vsew; \ + reg_t rd_num = insn.rd(); \ + reg_t rs1_num = insn.rs1(); \ + reg_t rs2_num = insn.rs2(); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { + +#define VI_LOOP_BASE \ + VI_GENERAL_LOOP_BASE \ + VI_LOOP_ELEMENT_SKIP(); + +#define VI_LOOP_END \ + } \ + P.VU.vstart->write(0); + +#define VI_LOOP_REDUCTION_END(x) \ + } \ + if (vl > 0) { \ + vd_0_des = vd_0_res; \ + } \ + P.VU.vstart->write(0); + +#define VI_LOOP_CARRY_BASE \ + VI_GENERAL_LOOP_BASE \ + VI_MASK_VARS \ + auto v0 = P.VU.elt(0, midx); \ + const uint64_t mmask = UINT64_C(1) << mpos; \ + const uint128_t op_mask = (UINT64_MAX >> (64 - sew)); \ + uint64_t carry = insn.v_vm() == 0 ? (v0 >> mpos) & 0x1 : 0; \ + uint128_t res = 0; \ + auto &vd = P.VU.elt(rd_num, midx, true); + +#define VI_LOOP_CARRY_END \ + vd = (vd & ~mmask) | (((res) << mpos) & mmask); \ + } \ + P.VU.vstart->write(0); +#define VI_LOOP_WITH_CARRY_BASE \ + VI_GENERAL_LOOP_BASE \ + VI_MASK_VARS \ + auto &v0 = P.VU.elt(0, midx); \ + const uint128_t op_mask = (UINT64_MAX >> (64 - sew)); \ + uint64_t carry = (v0 >> mpos) & 0x1; + +#define VI_LOOP_CMP_BASE \ + require(P.VU.vsew >= e8 && P.VU.vsew <= e64); \ + require_vector(true); \ + reg_t vl = P.VU.vl->read(); \ + reg_t sew = P.VU.vsew; \ + reg_t rd_num = insn.rd(); \ + reg_t rs1_num = insn.rs1(); \ + reg_t rs2_num = insn.rs2(); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); \ + uint64_t mmask = UINT64_C(1) << mpos; \ + uint64_t &vdi = P.VU.elt(insn.rd(), midx, true); \ + uint64_t res = 0; + +#define VI_LOOP_CMP_END \ + vdi = (vdi & ~mmask) | (((res) << mpos) & mmask); \ + } \ + P.VU.vstart->write(0); + +#define VI_LOOP_MASK(op) \ + require(P.VU.vsew <= e64); \ + require_vector(true); \ + reg_t vl = P.VU.vl->read(); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + int midx = i / 64; \ + int mpos = i % 64; \ + uint64_t mmask = UINT64_C(1) << mpos; \ + uint64_t vs2 = P.VU.elt(insn.rs2(), midx); \ + uint64_t vs1 = P.VU.elt(insn.rs1(), midx); \ + uint64_t &res = P.VU.elt(insn.rd(), midx, true); \ + res = (res & ~mmask) | ((op) & (1ULL << mpos)); \ + } \ + P.VU.vstart->write(0); + +#define VI_LOOP_NSHIFT_BASE \ + VI_GENERAL_LOOP_BASE; \ + VI_LOOP_ELEMENT_SKIP({ \ + require(!(insn.rd() == 0 && P.VU.vflmul > 1)); \ + }); + + +#define INT_ROUNDING(result, xrm, gb) \ + do { \ + const uint64_t lsb = 1UL << (gb); \ + const uint64_t lsb_half = lsb >> 1; \ + switch (xrm) { \ + case VRM::RNU: \ + result += lsb_half; \ + break; \ + case VRM::RNE: \ + if ((result & lsb_half) && ((result & (lsb_half - 1)) || (result & lsb))) \ + result += lsb; \ + break; \ + case VRM::RDN: \ + break; \ + case VRM::ROD: \ + if (result & (lsb - 1)) \ + result |= lsb; \ + break; \ + case VRM::INVALID_RM: \ + assert(true); \ + } \ + } while (0) + +// +// vector: integer and masking operand access helper +// +#define VXI_PARAMS(x) \ + type_sew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ + type_sew_t::type vs1 = P.VU.elt::type>(rs1_num, i); \ + type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); \ + type_sew_t::type rs1 = (type_sew_t::type)RS1; \ + type_sew_t::type simm5 = (type_sew_t::type)insn.v_simm5(); + +#define VV_U_PARAMS(x) \ + type_usew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ + type_usew_t::type vs1 = P.VU.elt::type>(rs1_num, i); \ + type_usew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VX_U_PARAMS(x) \ + type_usew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ + type_usew_t::type rs1 = (type_usew_t::type)RS1; \ + type_usew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VI_U_PARAMS(x) \ + type_usew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ + type_usew_t::type zimm5 = (type_usew_t::type)insn.v_zimm5(); \ + type_usew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VV_PARAMS(x) \ + type_sew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ + type_sew_t::type vs1 = P.VU.elt::type>(rs1_num, i); \ + type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VX_PARAMS(x) \ + type_sew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ + type_sew_t::type rs1 = (type_sew_t::type)RS1; \ + type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VI_PARAMS(x) \ + type_sew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ + type_sew_t::type simm5 = (type_sew_t::type)insn.v_simm5(); \ + type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define XV_PARAMS(x) \ + type_sew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ + type_usew_t::type vs2 = P.VU.elt::type>(rs2_num, RS1); + +#define VV_SU_PARAMS(x) \ + type_sew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ + type_usew_t::type vs1 = P.VU.elt::type>(rs1_num, i); \ + type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VX_SU_PARAMS(x) \ + type_sew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ + type_usew_t::type rs1 = (type_usew_t::type)RS1; \ + type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VV_UCMP_PARAMS(x) \ + type_usew_t::type vs1 = P.VU.elt::type>(rs1_num, i); \ + type_usew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VX_UCMP_PARAMS(x) \ + type_usew_t::type rs1 = (type_usew_t::type)RS1; \ + type_usew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VI_UCMP_PARAMS(x) \ + type_usew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VV_CMP_PARAMS(x) \ + type_sew_t::type vs1 = P.VU.elt::type>(rs1_num, i); \ + type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VX_CMP_PARAMS(x) \ + type_sew_t::type rs1 = (type_sew_t::type)RS1; \ + type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VI_CMP_PARAMS(x) \ + type_sew_t::type simm5 = (type_sew_t::type)insn.v_simm5(); \ + type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VI_XI_SLIDEDOWN_PARAMS(x, off) \ + auto &vd = P.VU.elt::type>(rd_num, i, true); \ + auto vs2 = P.VU.elt::type>(rs2_num, i + off); + +#define VI_XI_SLIDEUP_PARAMS(x, offset) \ + auto &vd = P.VU.elt::type>(rd_num, i, true); \ + auto vs2 = P.VU.elt::type>(rs2_num, i - offset); + +#define VI_NARROW_PARAMS(sew1, sew2) \ + auto &vd = P.VU.elt::type>(rd_num, i, true); \ + auto vs2_u = P.VU.elt::type>(rs2_num, i); \ + auto vs2 = P.VU.elt::type>(rs2_num, i); \ + auto zimm5 = (type_usew_t::type)insn.v_zimm5(); + +#define VX_NARROW_PARAMS(sew1, sew2) \ + auto &vd = P.VU.elt::type>(rd_num, i, true); \ + auto vs2_u = P.VU.elt::type>(rs2_num, i); \ + auto vs2 = P.VU.elt::type>(rs2_num, i); \ + auto rs1 = (type_sew_t::type)RS1; + +#define VV_NARROW_PARAMS(sew1, sew2) \ + auto &vd = P.VU.elt::type>(rd_num, i, true); \ + auto vs2_u = P.VU.elt::type>(rs2_num, i); \ + auto vs2 = P.VU.elt::type>(rs2_num, i); \ + auto vs1 = P.VU.elt::type>(rs1_num, i); + +#define XI_CARRY_PARAMS(x) \ + auto vs2 = P.VU.elt::type>(rs2_num, i); \ + auto rs1 = (type_sew_t::type)RS1; \ + auto simm5 = (type_sew_t::type)insn.v_simm5(); \ + +#define VV_CARRY_PARAMS(x) \ + auto vs2 = P.VU.elt::type>(rs2_num, i); \ + auto vs1 = P.VU.elt::type>(rs1_num, i); \ + +#define XI_WITH_CARRY_PARAMS(x) \ + auto vs2 = P.VU.elt::type>(rs2_num, i); \ + auto rs1 = (type_sew_t::type)RS1; \ + auto simm5 = (type_sew_t::type)insn.v_simm5(); \ + auto &vd = P.VU.elt::type>(rd_num, i, true); + +#define VV_WITH_CARRY_PARAMS(x) \ + auto vs2 = P.VU.elt::type>(rs2_num, i); \ + auto vs1 = P.VU.elt::type>(rs1_num, i); \ + auto &vd = P.VU.elt::type>(rd_num, i, true); + +#define VFP_V_PARAMS(width) \ + float##width##_t &vd = P.VU.elt(rd_num, i, true); \ + float##width##_t vs2 = P.VU.elt(rs2_num, i); + +#define VFP_VV_PARAMS(width) \ + float##width##_t &vd = P.VU.elt(rd_num, i, true); \ + float##width##_t vs1 = P.VU.elt(rs1_num, i); \ + float##width##_t vs2 = P.VU.elt(rs2_num, i); + +#define VFP_VF_PARAMS(width) \ + float##width##_t &vd = P.VU.elt(rd_num, i, true); \ + float##width##_t rs1 = f##width(READ_FREG(rs1_num)); \ + float##width##_t vs2 = P.VU.elt(rs2_num, i); + +#define CVT_FP_TO_FP_PARAMS(from_width, to_width) \ + auto vs2 = P.VU.elt(rs2_num, i); \ + auto &vd = P.VU.elt(rd_num, i, true); + +#define CVT_INT_TO_FP_PARAMS(from_width, to_width, sign) \ + auto vs2 = P.VU.elt(rs2_num, i); \ + auto &vd = P.VU.elt(rd_num, i, true); + +#define CVT_FP_TO_INT_PARAMS(from_width, to_width, sign) \ + auto vs2 = P.VU.elt(rs2_num, i); \ + auto &vd = P.VU.elt(rd_num, i, true); + +// +// vector: integer and masking operation loop +// + +#define INSNS_BASE(PARAMS, BODY) \ + if (sew == e8) { \ + PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + PARAMS(e64); \ + BODY; \ + } + +// comparision result to masking register +#define VI_LOOP_CMP_BODY(PARAMS, BODY) \ + VI_LOOP_CMP_BASE \ + INSNS_BASE(PARAMS, BODY) \ + VI_LOOP_CMP_END + +#define VI_VV_LOOP_CMP(BODY) \ + VI_CHECK_MSS(true); \ + VI_LOOP_CMP_BODY(VV_CMP_PARAMS, BODY) + +#define VI_VX_LOOP_CMP(BODY) \ + VI_CHECK_MSS(false); \ + VI_LOOP_CMP_BODY(VX_CMP_PARAMS, BODY) + +#define VI_VI_LOOP_CMP(BODY) \ + VI_CHECK_MSS(false); \ + VI_LOOP_CMP_BODY(VI_CMP_PARAMS, BODY) + +#define VI_VV_ULOOP_CMP(BODY) \ + VI_CHECK_MSS(true); \ + VI_LOOP_CMP_BODY(VV_UCMP_PARAMS, BODY) + +#define VI_VX_ULOOP_CMP(BODY) \ + VI_CHECK_MSS(false); \ + VI_LOOP_CMP_BODY(VX_UCMP_PARAMS, BODY) + +#define VI_VI_ULOOP_CMP(BODY) \ + VI_CHECK_MSS(false); \ + VI_LOOP_CMP_BODY(VI_UCMP_PARAMS, BODY) + +// merge and copy loop +#define VI_MERGE_VARS \ + VI_MASK_VARS \ + bool use_first = (P.VU.elt(0, midx) >> mpos) & 0x1; + +#define VI_MERGE_LOOP_BASE \ + require_vector(true); \ + VI_GENERAL_LOOP_BASE \ + VI_MERGE_VARS + +#define VI_VV_MERGE_LOOP(BODY) \ + VI_CHECK_SSS(true); \ + VI_MERGE_LOOP_BASE \ + if (sew == e8) { \ + VV_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VV_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VV_PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + VV_PARAMS(e64); \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VX_MERGE_LOOP(BODY) \ + VI_CHECK_SSS(false); \ + VI_MERGE_LOOP_BASE \ + if (sew == e8) { \ + VX_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VX_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VX_PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + VX_PARAMS(e64); \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VI_MERGE_LOOP(BODY) \ + VI_CHECK_SSS(false); \ + VI_MERGE_LOOP_BASE \ + if (sew == e8) { \ + VI_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VI_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VI_PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + VI_PARAMS(e64); \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VF_MERGE_LOOP(BODY) \ + VI_CHECK_SSS(false); \ + VI_VFP_COMMON \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_MERGE_VARS \ + if (P.VU.vsew == e16) { \ + VFP_VF_PARAMS(16); \ + BODY; \ + } else if (P.VU.vsew == e32) { \ + VFP_VF_PARAMS(32); \ + BODY; \ + } else if (P.VU.vsew == e64) { \ + VFP_VF_PARAMS(64); \ + BODY; \ + } \ + VI_LOOP_END + +// reduction loop - signed +#define VI_LOOP_REDUCTION_BASE(x) \ + require(x >= e8 && x <= e64); \ + reg_t vl = P.VU.vl->read(); \ + reg_t rd_num = insn.rd(); \ + reg_t rs1_num = insn.rs1(); \ + reg_t rs2_num = insn.rs2(); \ + auto &vd_0_des = P.VU.elt::type>(rd_num, 0, true); \ + auto vd_0_res = P.VU.elt::type>(rs1_num, 0); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); \ + auto vs2 = P.VU.elt::type>(rs2_num, i); \ + +#define REDUCTION_LOOP(x, BODY) \ + VI_LOOP_REDUCTION_BASE(x) \ + BODY; \ + VI_LOOP_REDUCTION_END(x) + +#define VI_VV_LOOP_REDUCTION(BODY) \ + VI_CHECK_REDUCTION(false); \ + reg_t sew = P.VU.vsew; \ + if (sew == e8) { \ + REDUCTION_LOOP(e8, BODY) \ + } else if (sew == e16) { \ + REDUCTION_LOOP(e16, BODY) \ + } else if (sew == e32) { \ + REDUCTION_LOOP(e32, BODY) \ + } else if (sew == e64) { \ + REDUCTION_LOOP(e64, BODY) \ + } + +// reduction loop - unsigned +#define VI_ULOOP_REDUCTION_BASE(x) \ + require(x >= e8 && x <= e64); \ + reg_t vl = P.VU.vl->read(); \ + reg_t rd_num = insn.rd(); \ + reg_t rs1_num = insn.rs1(); \ + reg_t rs2_num = insn.rs2(); \ + auto &vd_0_des = P.VU.elt::type>(rd_num, 0, true); \ + auto vd_0_res = P.VU.elt::type>(rs1_num, 0); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); \ + auto vs2 = P.VU.elt::type>(rs2_num, i); + +#define REDUCTION_ULOOP(x, BODY) \ + VI_ULOOP_REDUCTION_BASE(x) \ + BODY; \ + VI_LOOP_REDUCTION_END(x) + +#define VI_VV_ULOOP_REDUCTION(BODY) \ + VI_CHECK_REDUCTION(false); \ + reg_t sew = P.VU.vsew; \ + if (sew == e8) { \ + REDUCTION_ULOOP(e8, BODY) \ + } else if (sew == e16) { \ + REDUCTION_ULOOP(e16, BODY) \ + } else if (sew == e32) { \ + REDUCTION_ULOOP(e32, BODY) \ + } else if (sew == e64) { \ + REDUCTION_ULOOP(e64, BODY) \ + } + + +// genearl VXI signed/unsigned loop +#define VI_VV_ULOOP(BODY) \ + VI_CHECK_SSS(true) \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VV_U_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VV_U_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VV_U_PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + VV_U_PARAMS(e64); \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VV_LOOP(BODY) \ + VI_CHECK_SSS(true) \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VV_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VV_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VV_PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + VV_PARAMS(e64); \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VX_ULOOP(BODY) \ + VI_CHECK_SSS(false) \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VX_U_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VX_U_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VX_U_PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + VX_U_PARAMS(e64); \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VX_LOOP(BODY) \ + VI_CHECK_SSS(false) \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VX_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VX_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VX_PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + VX_PARAMS(e64); \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VI_ULOOP(BODY) \ + VI_CHECK_SSS(false) \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VI_U_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VI_U_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VI_U_PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + VI_U_PARAMS(e64); \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VI_LOOP(BODY) \ + VI_CHECK_SSS(false) \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VI_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VI_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VI_PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + VI_PARAMS(e64); \ + BODY; \ + } \ + VI_LOOP_END + +// signed unsigned operation loop (e.g. mulhsu) +#define VI_VV_SU_LOOP(BODY) \ + VI_CHECK_SSS(true) \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VV_SU_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VV_SU_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VV_SU_PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + VV_SU_PARAMS(e64); \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VX_SU_LOOP(BODY) \ + VI_CHECK_SSS(false) \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VX_SU_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VX_SU_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VX_SU_PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + VX_SU_PARAMS(e64); \ + BODY; \ + } \ + VI_LOOP_END + +// narrow operation loop +#define VI_VV_LOOP_NARROW(BODY) \ + VI_CHECK_SDS(true); \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VV_NARROW_PARAMS(e8, e16) \ + BODY; \ + } else if (sew == e16) { \ + VV_NARROW_PARAMS(e16, e32) \ + BODY; \ + } else if (sew == e32) { \ + VV_NARROW_PARAMS(e32, e64) \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VX_LOOP_NARROW(BODY) \ + VI_CHECK_SDS(false); \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VX_NARROW_PARAMS(e8, e16) \ + BODY; \ + } else if (sew == e16) { \ + VX_NARROW_PARAMS(e16, e32) \ + BODY; \ + } else if (sew == e32) { \ + VX_NARROW_PARAMS(e32, e64) \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VI_LOOP_NARROW(BODY) \ + VI_CHECK_SDS(false); \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VI_NARROW_PARAMS(e8, e16) \ + BODY; \ + } else if (sew == e16) { \ + VI_NARROW_PARAMS(e16, e32) \ + BODY; \ + } else if (sew == e32) { \ + VI_NARROW_PARAMS(e32, e64) \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VI_LOOP_NSHIFT(BODY) \ + VI_CHECK_SDS(false); \ + VI_LOOP_NSHIFT_BASE \ + if (sew == e8) { \ + VI_NARROW_PARAMS(e8, e16) \ + BODY; \ + } else if (sew == e16) { \ + VI_NARROW_PARAMS(e16, e32) \ + BODY; \ + } else if (sew == e32) { \ + VI_NARROW_PARAMS(e32, e64) \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VX_LOOP_NSHIFT(BODY) \ + VI_CHECK_SDS(false); \ + VI_LOOP_NSHIFT_BASE \ + if (sew == e8) { \ + VX_NARROW_PARAMS(e8, e16) \ + BODY; \ + } else if (sew == e16) { \ + VX_NARROW_PARAMS(e16, e32) \ + BODY; \ + } else if (sew == e32) { \ + VX_NARROW_PARAMS(e32, e64) \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VV_LOOP_NSHIFT(BODY) \ + VI_CHECK_SDS(true); \ + VI_LOOP_NSHIFT_BASE \ + if (sew == e8) { \ + VV_NARROW_PARAMS(e8, e16) \ + BODY; \ + } else if (sew == e16) { \ + VV_NARROW_PARAMS(e16, e32) \ + BODY; \ + } else if (sew == e32) { \ + VV_NARROW_PARAMS(e32, e64) \ + BODY; \ + } \ + VI_LOOP_END + +// widen operation loop +#define VI_VV_LOOP_WIDEN(BODY) \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VV_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VV_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VV_PARAMS(e32); \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VX_LOOP_WIDEN(BODY) \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VX_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VX_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VX_PARAMS(e32); \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_WIDE_OP_AND_ASSIGN(var0, var1, var2, op0, op1, sign) \ + switch (P.VU.vsew) { \ + case e8: { \ + sign##16_t vd_w = P.VU.elt(rd_num, i); \ + P.VU.elt(rd_num, i, true) = \ + op1((sign##16_t)(sign##8_t)var0 op0 (sign##16_t)(sign##8_t)var1) + var2; \ + } \ + break; \ + case e16: { \ + sign##32_t vd_w = P.VU.elt(rd_num, i); \ + P.VU.elt(rd_num, i, true) = \ + op1((sign##32_t)(sign##16_t)var0 op0 (sign##32_t)(sign##16_t)var1) + var2; \ + } \ + break; \ + default: { \ + sign##64_t vd_w = P.VU.elt(rd_num, i); \ + P.VU.elt(rd_num, i, true) = \ + op1((sign##64_t)(sign##32_t)var0 op0 (sign##64_t)(sign##32_t)var1) + var2; \ + } \ + break; \ + } + +#define VI_WIDE_OP_AND_ASSIGN_MIX(var0, var1, var2, op0, op1, sign_d, sign_1, sign_2) \ + switch (P.VU.vsew) { \ + case e8: { \ + sign_d##16_t vd_w = P.VU.elt(rd_num, i); \ + P.VU.elt(rd_num, i, true) = \ + op1((sign_1##16_t)(sign_1##8_t)var0 op0 (sign_2##16_t)(sign_2##8_t)var1) + var2; \ + } \ + break; \ + case e16: { \ + sign_d##32_t vd_w = P.VU.elt(rd_num, i); \ + P.VU.elt(rd_num, i, true) = \ + op1((sign_1##32_t)(sign_1##16_t)var0 op0 (sign_2##32_t)(sign_2##16_t)var1) + var2; \ + } \ + break; \ + default: { \ + sign_d##64_t vd_w = P.VU.elt(rd_num, i); \ + P.VU.elt(rd_num, i, true) = \ + op1((sign_1##64_t)(sign_1##32_t)var0 op0 (sign_2##64_t)(sign_2##32_t)var1) + var2; \ + } \ + break; \ + } + +#define VI_WIDE_WVX_OP(var0, op0, sign) \ + switch (P.VU.vsew) { \ + case e8: { \ + sign##16_t &vd_w = P.VU.elt(rd_num, i, true); \ + sign##16_t vs2_w = P.VU.elt(rs2_num, i); \ + vd_w = vs2_w op0 (sign##16_t)(sign##8_t)var0; \ + } \ + break; \ + case e16: { \ + sign##32_t &vd_w = P.VU.elt(rd_num, i, true); \ + sign##32_t vs2_w = P.VU.elt(rs2_num, i); \ + vd_w = vs2_w op0 (sign##32_t)(sign##16_t)var0; \ + } \ + break; \ + default: { \ + sign##64_t &vd_w = P.VU.elt(rd_num, i, true); \ + sign##64_t vs2_w = P.VU.elt(rs2_num, i); \ + vd_w = vs2_w op0 (sign##64_t)(sign##32_t)var0; \ + } \ + break; \ + } + +// wide reduction loop - signed +#define VI_LOOP_WIDE_REDUCTION_BASE(sew1, sew2) \ + reg_t vl = P.VU.vl->read(); \ + reg_t rd_num = insn.rd(); \ + reg_t rs1_num = insn.rs1(); \ + reg_t rs2_num = insn.rs2(); \ + auto &vd_0_des = P.VU.elt::type>(rd_num, 0, true); \ + auto vd_0_res = P.VU.elt::type>(rs1_num, 0); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); \ + auto vs2 = P.VU.elt::type>(rs2_num, i); + +#define WIDE_REDUCTION_LOOP(sew1, sew2, BODY) \ + VI_LOOP_WIDE_REDUCTION_BASE(sew1, sew2) \ + BODY; \ + VI_LOOP_REDUCTION_END(sew2) + +#define VI_VV_LOOP_WIDE_REDUCTION(BODY) \ + VI_CHECK_REDUCTION(true); \ + reg_t sew = P.VU.vsew; \ + if (sew == e8) { \ + WIDE_REDUCTION_LOOP(e8, e16, BODY) \ + } else if (sew == e16) { \ + WIDE_REDUCTION_LOOP(e16, e32, BODY) \ + } else if (sew == e32) { \ + WIDE_REDUCTION_LOOP(e32, e64, BODY) \ + } + +// wide reduction loop - unsigned +#define VI_ULOOP_WIDE_REDUCTION_BASE(sew1, sew2) \ + reg_t vl = P.VU.vl->read(); \ + reg_t rd_num = insn.rd(); \ + reg_t rs1_num = insn.rs1(); \ + reg_t rs2_num = insn.rs2(); \ + auto &vd_0_des = P.VU.elt::type>(rd_num, 0, true); \ + auto vd_0_res = P.VU.elt::type>(rs1_num, 0); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); \ + auto vs2 = P.VU.elt::type>(rs2_num, i); + +#define WIDE_REDUCTION_ULOOP(sew1, sew2, BODY) \ + VI_ULOOP_WIDE_REDUCTION_BASE(sew1, sew2) \ + BODY; \ + VI_LOOP_REDUCTION_END(sew2) + +#define VI_VV_ULOOP_WIDE_REDUCTION(BODY) \ + VI_CHECK_REDUCTION(true); \ + reg_t sew = P.VU.vsew; \ + if (sew == e8) { \ + WIDE_REDUCTION_ULOOP(e8, e16, BODY) \ + } else if (sew == e16) { \ + WIDE_REDUCTION_ULOOP(e16, e32, BODY) \ + } else if (sew == e32) { \ + WIDE_REDUCTION_ULOOP(e32, e64, BODY) \ + } + +// carry/borrow bit loop +#define VI_VV_LOOP_CARRY(BODY) \ + VI_CHECK_MSS(true); \ + VI_LOOP_CARRY_BASE \ + if (sew == e8) { \ + VV_CARRY_PARAMS(e8) \ + BODY; \ + } else if (sew == e16) { \ + VV_CARRY_PARAMS(e16) \ + BODY; \ + } else if (sew == e32) { \ + VV_CARRY_PARAMS(e32) \ + BODY; \ + } else if (sew == e64) { \ + VV_CARRY_PARAMS(e64) \ + BODY; \ + } \ + VI_LOOP_CARRY_END + +#define VI_XI_LOOP_CARRY(BODY) \ + VI_CHECK_MSS(false); \ + VI_LOOP_CARRY_BASE \ + if (sew == e8) { \ + XI_CARRY_PARAMS(e8) \ + BODY; \ + } else if (sew == e16) { \ + XI_CARRY_PARAMS(e16) \ + BODY; \ + } else if (sew == e32) { \ + XI_CARRY_PARAMS(e32) \ + BODY; \ + } else if (sew == e64) { \ + XI_CARRY_PARAMS(e64) \ + BODY; \ + } \ + VI_LOOP_CARRY_END + +#define VI_VV_LOOP_WITH_CARRY(BODY) \ + require_vm; \ + VI_CHECK_SSS(true); \ + VI_LOOP_WITH_CARRY_BASE \ + if (sew == e8) { \ + VV_WITH_CARRY_PARAMS(e8) \ + BODY; \ + } else if (sew == e16) { \ + VV_WITH_CARRY_PARAMS(e16) \ + BODY; \ + } else if (sew == e32) { \ + VV_WITH_CARRY_PARAMS(e32) \ + BODY; \ + } else if (sew == e64) { \ + VV_WITH_CARRY_PARAMS(e64) \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_XI_LOOP_WITH_CARRY(BODY) \ + require_vm; \ + VI_CHECK_SSS(false); \ + VI_LOOP_WITH_CARRY_BASE \ + if (sew == e8) { \ + XI_WITH_CARRY_PARAMS(e8) \ + BODY; \ + } else if (sew == e16) { \ + XI_WITH_CARRY_PARAMS(e16) \ + BODY; \ + } else if (sew == e32) { \ + XI_WITH_CARRY_PARAMS(e32) \ + BODY; \ + } else if (sew == e64) { \ + XI_WITH_CARRY_PARAMS(e64) \ + BODY; \ + } \ + VI_LOOP_END + +// average loop +#define VI_VV_LOOP_AVG(op) \ +VRM xrm = p->VU.get_vround_mode(); \ +VI_VV_LOOP({ \ + uint128_t res = ((uint128_t)vs2) op vs1; \ + INT_ROUNDING(res, xrm, 1); \ + vd = res >> 1; \ +}) + +#define VI_VX_LOOP_AVG(op) \ +VRM xrm = p->VU.get_vround_mode(); \ +VI_VX_LOOP({ \ + uint128_t res = ((uint128_t)vs2) op rs1; \ + INT_ROUNDING(res, xrm, 1); \ + vd = res >> 1; \ +}) + +#define VI_VV_ULOOP_AVG(op) \ +VRM xrm = p->VU.get_vround_mode(); \ +VI_VV_ULOOP({ \ + uint128_t res = ((uint128_t)vs2) op vs1; \ + INT_ROUNDING(res, xrm, 1); \ + vd = res >> 1; \ +}) + +#define VI_VX_ULOOP_AVG(op) \ +VRM xrm = p->VU.get_vround_mode(); \ +VI_VX_ULOOP({ \ + uint128_t res = ((uint128_t)vs2) op rs1; \ + INT_ROUNDING(res, xrm, 1); \ + vd = res >> 1; \ +}) + +// +// vector: load/store helper +// +#define VI_STRIP(inx) \ + reg_t vreg_inx = inx; + +#define VI_DUPLICATE_VREG(reg_num, idx_sew) \ +reg_t index[P.VU.vlmax]; \ + for (reg_t i = 0; i < P.VU.vlmax && P.VU.vl->read() != 0; ++i) { \ + switch (idx_sew) { \ + case e8: \ + index[i] = P.VU.elt(reg_num, i); \ + break; \ + case e16: \ + index[i] = P.VU.elt(reg_num, i); \ + break; \ + case e32: \ + index[i] = P.VU.elt(reg_num, i); \ + break; \ + case e64: \ + index[i] = P.VU.elt(reg_num, i); \ + break; \ + } \ +} + +#define VI_LD(stride, offset, elt_width, is_mask_ldst) \ + const reg_t nf = insn.v_nf() + 1; \ + const reg_t vl = is_mask_ldst ? ((P.VU.vl->read() + 7) / 8) : P.VU.vl->read(); \ + const reg_t baseAddr = RS1; \ + const reg_t vd = insn.rd(); \ + VI_CHECK_LOAD(elt_width, is_mask_ldst); \ + for (reg_t i = 0; i < vl; ++i) { \ + VI_ELEMENT_SKIP(i); \ + VI_STRIP(i); \ + P.VU.vstart->write(i); \ + for (reg_t fn = 0; fn < nf; ++fn) { \ + elt_width##_t val = MMU.load_##elt_width( \ + baseAddr + (stride) + (offset) * sizeof(elt_width##_t)); \ + P.VU.elt(vd + fn * emul, vreg_inx, true) = val; \ + } \ + } \ + P.VU.vstart->write(0); + +#define VI_LD_INDEX(elt_width, is_seg) \ + const reg_t nf = insn.v_nf() + 1; \ + const reg_t vl = P.VU.vl->read(); \ + const reg_t baseAddr = RS1; \ + const reg_t vd = insn.rd(); \ + if (!is_seg) \ + require(nf == 1); \ + VI_CHECK_LD_INDEX(elt_width); \ + VI_DUPLICATE_VREG(insn.rs2(), elt_width); \ + for (reg_t i = 0; i < vl; ++i) { \ + VI_ELEMENT_SKIP(i); \ + VI_STRIP(i); \ + P.VU.vstart->write(i); \ + for (reg_t fn = 0; fn < nf; ++fn) { \ + switch (P.VU.vsew) { \ + case e8: \ + P.VU.elt(vd + fn * flmul, vreg_inx, true) = \ + MMU.load_uint8(baseAddr + index[i] + fn * 1); \ + break; \ + case e16: \ + P.VU.elt(vd + fn * flmul, vreg_inx, true) = \ + MMU.load_uint16(baseAddr + index[i] + fn * 2); \ + break; \ + case e32: \ + P.VU.elt(vd + fn * flmul, vreg_inx, true) = \ + MMU.load_uint32(baseAddr + index[i] + fn * 4); \ + break; \ + default: \ + P.VU.elt(vd + fn * flmul, vreg_inx, true) = \ + MMU.load_uint64(baseAddr + index[i] + fn * 8); \ + break; \ + } \ + } \ + } \ + P.VU.vstart->write(0); + +#define VI_ST(stride, offset, elt_width, is_mask_ldst) \ + const reg_t nf = insn.v_nf() + 1; \ + const reg_t vl = is_mask_ldst ? ((P.VU.vl->read() + 7) / 8) : P.VU.vl->read(); \ + const reg_t baseAddr = RS1; \ + const reg_t vs3 = insn.rd(); \ + VI_CHECK_STORE(elt_width, is_mask_ldst); \ + for (reg_t i = 0; i < vl; ++i) { \ + VI_STRIP(i) \ + VI_ELEMENT_SKIP(i); \ + P.VU.vstart->write(i); \ + for (reg_t fn = 0; fn < nf; ++fn) { \ + elt_width##_t val = P.VU.elt(vs3 + fn * emul, vreg_inx); \ + MMU.store_##elt_width( \ + baseAddr + (stride) + (offset) * sizeof(elt_width##_t), val); \ + } \ + } \ + P.VU.vstart->write(0); + +#define VI_ST_INDEX(elt_width, is_seg) \ + const reg_t nf = insn.v_nf() + 1; \ + const reg_t vl = P.VU.vl->read(); \ + const reg_t baseAddr = RS1; \ + const reg_t vs3 = insn.rd(); \ + if (!is_seg) \ + require(nf == 1); \ + VI_CHECK_ST_INDEX(elt_width); \ + VI_DUPLICATE_VREG(insn.rs2(), elt_width); \ + for (reg_t i = 0; i < vl; ++i) { \ + VI_STRIP(i) \ + VI_ELEMENT_SKIP(i); \ + P.VU.vstart->write(i); \ + for (reg_t fn = 0; fn < nf; ++fn) { \ + switch (P.VU.vsew) { \ + case e8: \ + MMU.store_uint8(baseAddr + index[i] + fn * 1, \ + P.VU.elt(vs3 + fn * flmul, vreg_inx)); \ + break; \ + case e16: \ + MMU.store_uint16(baseAddr + index[i] + fn * 2, \ + P.VU.elt(vs3 + fn * flmul, vreg_inx)); \ + break; \ + case e32: \ + MMU.store_uint32(baseAddr + index[i] + fn * 4, \ + P.VU.elt(vs3 + fn * flmul, vreg_inx)); \ + break; \ + default: \ + MMU.store_uint64(baseAddr + index[i] + fn * 8, \ + P.VU.elt(vs3 + fn * flmul, vreg_inx)); \ + break; \ + } \ + } \ + } \ + P.VU.vstart->write(0); + +#define VI_LDST_FF(elt_width) \ + const reg_t nf = insn.v_nf() + 1; \ + const reg_t sew = p->VU.vsew; \ + const reg_t vl = p->VU.vl->read(); \ + const reg_t baseAddr = RS1; \ + const reg_t rd_num = insn.rd(); \ + VI_CHECK_LOAD(elt_width, false); \ + bool early_stop = false; \ + for (reg_t i = p->VU.vstart->read(); i < vl; ++i) { \ + VI_STRIP(i); \ + VI_ELEMENT_SKIP(i); \ + \ + for (reg_t fn = 0; fn < nf; ++fn) { \ + uint64_t val; \ + try { \ + val = MMU.load_##elt_width( \ + baseAddr + (i * nf + fn) * sizeof(elt_width##_t)); \ + } catch (trap_t& t) { \ + if (i == 0) \ + throw; /* Only take exception on zeroth element */ \ + /* Reduce VL if an exception occurs on a later element */ \ + early_stop = true; \ + P.VU.vl->write_raw(i); \ + break; \ + } \ + p->VU.elt(rd_num + fn * emul, vreg_inx, true) = val; \ + } \ + \ + if (early_stop) { \ + break; \ + } \ + } \ + p->VU.vstart->write(0); + +#define VI_LD_WHOLE(elt_width) \ + require_vector_novtype(true, false); \ + require(sizeof(elt_width ## _t) * 8 <= P.VU.ELEN); \ + const reg_t baseAddr = RS1; \ + const reg_t vd = insn.rd(); \ + const reg_t len = insn.v_nf() + 1; \ + require_align(vd, len); \ + const reg_t elt_per_reg = P.VU.vlenb / sizeof(elt_width ## _t); \ + const reg_t size = len * elt_per_reg; \ + if (P.VU.vstart->read() < size) { \ + reg_t i = P.VU.vstart->read() / elt_per_reg; \ + reg_t off = P.VU.vstart->read() % elt_per_reg; \ + if (off) { \ + for (reg_t pos = off; pos < elt_per_reg; ++pos) { \ + auto val = MMU.load_## elt_width(baseAddr + \ + P.VU.vstart->read() * sizeof(elt_width ## _t)); \ + P.VU.elt(vd + i, pos, true) = val; \ + P.VU.vstart->write(P.VU.vstart->read() + 1); \ + } \ + ++i; \ + } \ + for (; i < len; ++i) { \ + for (reg_t pos = 0; pos < elt_per_reg; ++pos) { \ + auto val = MMU.load_## elt_width(baseAddr + \ + P.VU.vstart->read() * sizeof(elt_width ## _t)); \ + P.VU.elt(vd + i, pos, true) = val; \ + P.VU.vstart->write(P.VU.vstart->read() + 1); \ + } \ + } \ + } \ + P.VU.vstart->write(0); + +#define VI_ST_WHOLE \ + require_vector_novtype(true, false); \ + const reg_t baseAddr = RS1; \ + const reg_t vs3 = insn.rd(); \ + const reg_t len = insn.v_nf() + 1; \ + require_align(vs3, len); \ + const reg_t size = len * P.VU.vlenb; \ + \ + if (P.VU.vstart->read() < size) { \ + reg_t i = P.VU.vstart->read() / P.VU.vlenb; \ + reg_t off = P.VU.vstart->read() % P.VU.vlenb; \ + if (off) { \ + for (reg_t pos = off; pos < P.VU.vlenb; ++pos) { \ + auto val = P.VU.elt(vs3 + i, pos); \ + MMU.store_uint8(baseAddr + P.VU.vstart->read(), val); \ + P.VU.vstart->write(P.VU.vstart->read() + 1); \ + } \ + i++; \ + } \ + for (; i < len; ++i) { \ + for (reg_t pos = 0; pos < P.VU.vlenb; ++pos) { \ + auto val = P.VU.elt(vs3 + i, pos); \ + MMU.store_uint8(baseAddr + P.VU.vstart->read(), val); \ + P.VU.vstart->write(P.VU.vstart->read() + 1); \ + } \ + } \ + } \ + P.VU.vstart->write(0); + +// +// vector: amo +// +#define VI_AMO(op, type, idx_type) \ + require_vector(false); \ + require_align(insn.rd(), P.VU.vflmul); \ + require(P.VU.vsew <= P.get_xlen() && P.VU.vsew >= 32); \ + require_align(insn.rd(), P.VU.vflmul); \ + float vemul = ((float)idx_type / P.VU.vsew * P.VU.vflmul); \ + require(vemul >= 0.125 && vemul <= 8); \ + require_align(insn.rs2(), vemul); \ + if (insn.v_wd()) { \ + require_vm; \ + if (idx_type > P.VU.vsew) { \ + if (insn.rd() != insn.rs2()) \ + require_noover(insn.rd(), P.VU.vflmul, insn.rs2(), vemul); \ + } else if (idx_type < P.VU.vsew) { \ + if (vemul < 1) { \ + require_noover(insn.rd(), P.VU.vflmul, insn.rs2(), vemul); \ + } else { \ + require_noover_widen(insn.rd(), P.VU.vflmul, insn.rs2(), vemul); \ + } \ + } \ + } \ + VI_DUPLICATE_VREG(insn.rs2(), idx_type); \ + const reg_t vl = P.VU.vl->read(); \ + const reg_t baseAddr = RS1; \ + const reg_t vd = insn.rd(); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_ELEMENT_SKIP(i); \ + VI_STRIP(i); \ + P.VU.vstart->write(i); \ + switch (P.VU.vsew) { \ + case e32: { \ + auto vs3 = P.VU.elt< type ## 32_t>(vd, vreg_inx); \ + auto val = MMU.amo_uint32(baseAddr + index[i], [&](type ## 32_t lhs) { op }); \ + if (insn.v_wd()) \ + P.VU.elt< type ## 32_t>(vd, vreg_inx, true) = val; \ + } \ + break; \ + case e64: { \ + auto vs3 = P.VU.elt< type ## 64_t>(vd, vreg_inx); \ + auto val = MMU.amo_uint64(baseAddr + index[i], [&](type ## 64_t lhs) { op }); \ + if (insn.v_wd()) \ + P.VU.elt< type ## 64_t>(vd, vreg_inx, true) = val; \ + } \ + break; \ + default: \ + require(0); \ + break; \ + } \ + } \ + P.VU.vstart->write(0); + +// vector: sign/unsiged extension +#define VI_VV_EXT(div, type) \ + require(insn.rd() != insn.rs2()); \ + require_vm; \ + reg_t from = P.VU.vsew / div; \ + require(from >= e8 && from <= e64); \ + require(((float)P.VU.vflmul / div) >= 0.125 && ((float)P.VU.vflmul / div) <= 8 ); \ + require_align(insn.rd(), P.VU.vflmul); \ + require_align(insn.rs2(), P.VU.vflmul / div); \ + if ((P.VU.vflmul / div) < 1) { \ + require_noover(insn.rd(), P.VU.vflmul, insn.rs2(), P.VU.vflmul / div); \ + } else { \ + require_noover_widen(insn.rd(), P.VU.vflmul, insn.rs2(), P.VU.vflmul / div); \ + } \ + reg_t pat = (((P.VU.vsew >> 3) << 4) | from >> 3); \ + VI_GENERAL_LOOP_BASE \ + VI_LOOP_ELEMENT_SKIP(); \ + switch (pat) { \ + case 0x21: \ + P.VU.elt(rd_num, i, true) = P.VU.elt(rs2_num, i); \ + break; \ + case 0x41: \ + P.VU.elt(rd_num, i, true) = P.VU.elt(rs2_num, i); \ + break; \ + case 0x81: \ + P.VU.elt(rd_num, i, true) = P.VU.elt(rs2_num, i); \ + break; \ + case 0x42: \ + P.VU.elt(rd_num, i, true) = P.VU.elt(rs2_num, i); \ + break; \ + case 0x82: \ + P.VU.elt(rd_num, i, true) = P.VU.elt(rs2_num, i); \ + break; \ + case 0x84: \ + P.VU.elt(rd_num, i, true) = P.VU.elt(rs2_num, i); \ + break; \ + case 0x88: \ + P.VU.elt(rd_num, i, true) = P.VU.elt(rs2_num, i); \ + break; \ + default: \ + break; \ + } \ + VI_LOOP_END + +// +// vector: vfp helper +// +#define VI_VFP_COMMON \ + require_fp; \ + require((P.VU.vsew == e16 && p->extension_enabled(EXT_ZFH)) || \ + (P.VU.vsew == e32 && p->extension_enabled('F')) || \ + (P.VU.vsew == e64 && p->extension_enabled('D'))); \ + require_vector(true); \ + require(STATE.frm->read() < 0x5); \ + reg_t vl = P.VU.vl->read(); \ + reg_t rd_num = insn.rd(); \ + reg_t rs1_num = insn.rs1(); \ + reg_t rs2_num = insn.rs2(); \ + softfloat_roundingMode = STATE.frm->read(); + +#define VI_VFP_LOOP_BASE \ + VI_VFP_COMMON \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); + +#define VI_VFP_LOOP_CMP_BASE \ + VI_VFP_COMMON \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); \ + uint64_t mmask = UINT64_C(1) << mpos; \ + uint64_t &vd = P.VU.elt(rd_num, midx, true); \ + uint64_t res = 0; + +#define VI_VFP_LOOP_REDUCTION_BASE(width) \ + float##width##_t vd_0 = P.VU.elt(rd_num, 0); \ + float##width##_t vs1_0 = P.VU.elt(rs1_num, 0); \ + vd_0 = vs1_0; \ + bool is_active = false; \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); \ + float##width##_t vs2 = P.VU.elt(rs2_num, i); \ + is_active = true; \ + +#define VI_VFP_LOOP_WIDE_REDUCTION_BASE \ + VI_VFP_COMMON \ + float64_t vd_0 = f64(P.VU.elt(rs1_num, 0).v); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); + +#define VI_VFP_LOOP_END \ + } \ + P.VU.vstart->write(0); \ + +#define VI_VFP_LOOP_REDUCTION_END(x) \ + } \ + P.VU.vstart->write(0); \ + if (vl > 0) { \ + if (is_propagate && !is_active) { \ + switch (x) { \ + case e16: { \ + auto ret = f16_classify(f16(vd_0.v)); \ + if (ret & 0x300) { \ + if (ret & 0x100) { \ + softfloat_exceptionFlags |= softfloat_flag_invalid; \ + set_fp_exceptions; \ + } \ + P.VU.elt(rd_num, 0, true) = defaultNaNF16UI; \ + } else { \ + P.VU.elt(rd_num, 0, true) = vd_0.v; \ + } \ + } \ + break; \ + case e32: { \ + auto ret = f32_classify(f32(vd_0.v)); \ + if (ret & 0x300) { \ + if (ret & 0x100) { \ + softfloat_exceptionFlags |= softfloat_flag_invalid; \ + set_fp_exceptions; \ + } \ + P.VU.elt(rd_num, 0, true) = defaultNaNF32UI; \ + } else { \ + P.VU.elt(rd_num, 0, true) = vd_0.v; \ + } \ + } \ + break; \ + case e64: { \ + auto ret = f64_classify(f64(vd_0.v)); \ + if (ret & 0x300) { \ + if (ret & 0x100) { \ + softfloat_exceptionFlags |= softfloat_flag_invalid; \ + set_fp_exceptions; \ + } \ + P.VU.elt(rd_num, 0, true) = defaultNaNF64UI; \ + } else { \ + P.VU.elt(rd_num, 0, true) = vd_0.v; \ + } \ + } \ + break; \ + } \ + } else { \ + P.VU.elt::type>(rd_num, 0, true) = vd_0.v; \ + } \ + } + +#define VI_VFP_LOOP_CMP_END \ + switch (P.VU.vsew) { \ + case e16: \ + case e32: \ + case e64: { \ + vd = (vd & ~mmask) | (((res) << mpos) & mmask); \ + break; \ + } \ + default: \ + require(0); \ + break; \ + }; \ + } \ + P.VU.vstart->write(0); + +#define VI_VFP_VV_LOOP(BODY16, BODY32, BODY64) \ + VI_CHECK_SSS(true); \ + VI_VFP_LOOP_BASE \ + switch (P.VU.vsew) { \ + case e16: { \ + VFP_VV_PARAMS(16); \ + BODY16; \ + set_fp_exceptions; \ + break; \ + } \ + case e32: { \ + VFP_VV_PARAMS(32); \ + BODY32; \ + set_fp_exceptions; \ + break; \ + } \ + case e64: { \ + VFP_VV_PARAMS(64); \ + BODY64; \ + set_fp_exceptions; \ + break; \ + } \ + default: \ + require(0); \ + break; \ + }; \ + DEBUG_RVV_FP_VV; \ + VI_VFP_LOOP_END + +#define VI_VFP_V_LOOP(BODY16, BODY32, BODY64) \ + VI_CHECK_SSS(false); \ + VI_VFP_LOOP_BASE \ + switch (P.VU.vsew) { \ + case e16: { \ + VFP_V_PARAMS(16); \ + BODY16; \ + break; \ + } \ + case e32: { \ + VFP_V_PARAMS(32); \ + BODY32; \ + break; \ + } \ + case e64: { \ + VFP_V_PARAMS(64); \ + BODY64; \ + break; \ + } \ + default: \ + require(0); \ + break; \ + }; \ + set_fp_exceptions; \ + VI_VFP_LOOP_END + +#define VI_VFP_VV_LOOP_REDUCTION(BODY16, BODY32, BODY64) \ + VI_CHECK_REDUCTION(false) \ + VI_VFP_COMMON \ + switch (P.VU.vsew) { \ + case e16: { \ + VI_VFP_LOOP_REDUCTION_BASE(16) \ + BODY16; \ + set_fp_exceptions; \ + VI_VFP_LOOP_REDUCTION_END(e16) \ + break; \ + } \ + case e32: { \ + VI_VFP_LOOP_REDUCTION_BASE(32) \ + BODY32; \ + set_fp_exceptions; \ + VI_VFP_LOOP_REDUCTION_END(e32) \ + break; \ + } \ + case e64: { \ + VI_VFP_LOOP_REDUCTION_BASE(64) \ + BODY64; \ + set_fp_exceptions; \ + VI_VFP_LOOP_REDUCTION_END(e64) \ + break; \ + } \ + default: \ + require(0); \ + break; \ + }; \ + +#define VI_VFP_VV_LOOP_WIDE_REDUCTION(BODY16, BODY32) \ + VI_CHECK_REDUCTION(true) \ + VI_VFP_COMMON \ + require((P.VU.vsew == e16 && p->extension_enabled('F')) || \ + (P.VU.vsew == e32 && p->extension_enabled('D'))); \ + bool is_active = false; \ + switch (P.VU.vsew) { \ + case e16: { \ + float32_t vd_0 = P.VU.elt(rs1_num, 0); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); \ + is_active = true; \ + float32_t vs2 = f16_to_f32(P.VU.elt(rs2_num, i)); \ + BODY16; \ + set_fp_exceptions; \ + VI_VFP_LOOP_REDUCTION_END(e32) \ + break; \ + } \ + case e32: { \ + float64_t vd_0 = P.VU.elt(rs1_num, 0); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); \ + is_active = true; \ + float64_t vs2 = f32_to_f64(P.VU.elt(rs2_num, i)); \ + BODY32; \ + set_fp_exceptions; \ + VI_VFP_LOOP_REDUCTION_END(e64) \ + break; \ + } \ + default: \ + require(0); \ + break; \ + }; \ + +#define VI_VFP_VF_LOOP(BODY16, BODY32, BODY64) \ + VI_CHECK_SSS(false); \ + VI_VFP_LOOP_BASE \ + switch (P.VU.vsew) { \ + case e16: { \ + VFP_VF_PARAMS(16); \ + BODY16; \ + set_fp_exceptions; \ + break; \ + } \ + case e32: { \ + VFP_VF_PARAMS(32); \ + BODY32; \ + set_fp_exceptions; \ + break; \ + } \ + case e64: { \ + VFP_VF_PARAMS(64); \ + BODY64; \ + set_fp_exceptions; \ + break; \ + } \ + default: \ + require(0); \ + break; \ + }; \ + DEBUG_RVV_FP_VF; \ + VI_VFP_LOOP_END + +#define VI_VFP_VV_LOOP_CMP(BODY16, BODY32, BODY64) \ + VI_CHECK_MSS(true); \ + VI_VFP_LOOP_CMP_BASE \ + switch (P.VU.vsew) { \ + case e16: { \ + VFP_VV_PARAMS(16); \ + BODY16; \ + set_fp_exceptions; \ + break; \ + } \ + case e32: { \ + VFP_VV_PARAMS(32); \ + BODY32; \ + set_fp_exceptions; \ + break; \ + } \ + case e64: { \ + VFP_VV_PARAMS(64); \ + BODY64; \ + set_fp_exceptions; \ + break; \ + } \ + default: \ + require(0); \ + break; \ + }; \ + VI_VFP_LOOP_CMP_END \ + +#define VI_VFP_VF_LOOP_CMP(BODY16, BODY32, BODY64) \ + VI_CHECK_MSS(false); \ + VI_VFP_LOOP_CMP_BASE \ + switch (P.VU.vsew) { \ + case e16: { \ + VFP_VF_PARAMS(16); \ + BODY16; \ + set_fp_exceptions; \ + break; \ + } \ + case e32: { \ + VFP_VF_PARAMS(32); \ + BODY32; \ + set_fp_exceptions; \ + break; \ + } \ + case e64: { \ + VFP_VF_PARAMS(64); \ + BODY64; \ + set_fp_exceptions; \ + break; \ + } \ + default: \ + require(0); \ + break; \ + }; \ + VI_VFP_LOOP_CMP_END \ + +#define VI_VFP_VF_LOOP_WIDE(BODY16, BODY32) \ + VI_CHECK_DSS(false); \ + VI_VFP_LOOP_BASE \ + switch (P.VU.vsew) { \ + case e16: { \ + float32_t &vd = P.VU.elt(rd_num, i, true); \ + float32_t vs2 = f16_to_f32(P.VU.elt(rs2_num, i)); \ + float32_t rs1 = f16_to_f32(f16(READ_FREG(rs1_num))); \ + BODY16; \ + set_fp_exceptions; \ + break; \ + } \ + case e32: { \ + float64_t &vd = P.VU.elt(rd_num, i, true); \ + float64_t vs2 = f32_to_f64(P.VU.elt(rs2_num, i)); \ + float64_t rs1 = f32_to_f64(f32(READ_FREG(rs1_num))); \ + BODY32; \ + set_fp_exceptions; \ + break; \ + } \ + default: \ + require(0); \ + break; \ + }; \ + DEBUG_RVV_FP_VV; \ + VI_VFP_LOOP_END + + +#define VI_VFP_VV_LOOP_WIDE(BODY16, BODY32) \ + VI_CHECK_DSS(true); \ + VI_VFP_LOOP_BASE \ + switch (P.VU.vsew) { \ + case e16: { \ + float32_t &vd = P.VU.elt(rd_num, i, true); \ + float32_t vs2 = f16_to_f32(P.VU.elt(rs2_num, i)); \ + float32_t vs1 = f16_to_f32(P.VU.elt(rs1_num, i)); \ + BODY16; \ + set_fp_exceptions; \ + break; \ + } \ + case e32: { \ + float64_t &vd = P.VU.elt(rd_num, i, true); \ + float64_t vs2 = f32_to_f64(P.VU.elt(rs2_num, i)); \ + float64_t vs1 = f32_to_f64(P.VU.elt(rs1_num, i)); \ + BODY32; \ + set_fp_exceptions; \ + break; \ + } \ + default: \ + require(0); \ + break; \ + }; \ + DEBUG_RVV_FP_VV; \ + VI_VFP_LOOP_END + +#define VI_VFP_WF_LOOP_WIDE(BODY16, BODY32) \ + VI_CHECK_DDS(false); \ + VI_VFP_LOOP_BASE \ + switch (P.VU.vsew) { \ + case e16: { \ + float32_t &vd = P.VU.elt(rd_num, i, true); \ + float32_t vs2 = P.VU.elt(rs2_num, i); \ + float32_t rs1 = f16_to_f32(f16(READ_FREG(rs1_num))); \ + BODY16; \ + set_fp_exceptions; \ + break; \ + } \ + case e32: { \ + float64_t &vd = P.VU.elt(rd_num, i, true); \ + float64_t vs2 = P.VU.elt(rs2_num, i); \ + float64_t rs1 = f32_to_f64(f32(READ_FREG(rs1_num))); \ + BODY32; \ + set_fp_exceptions; \ + break; \ + } \ + default: \ + require(0); \ + }; \ + DEBUG_RVV_FP_VV; \ + VI_VFP_LOOP_END + +#define VI_VFP_WV_LOOP_WIDE(BODY16, BODY32) \ + VI_CHECK_DDS(true); \ + VI_VFP_LOOP_BASE \ + switch (P.VU.vsew) { \ + case e16: { \ + float32_t &vd = P.VU.elt(rd_num, i, true); \ + float32_t vs2 = P.VU.elt(rs2_num, i); \ + float32_t vs1 = f16_to_f32(P.VU.elt(rs1_num, i)); \ + BODY16; \ + set_fp_exceptions; \ + break; \ + } \ + case e32: { \ + float64_t &vd = P.VU.elt(rd_num, i, true); \ + float64_t vs2 = P.VU.elt(rs2_num, i); \ + float64_t vs1 = f32_to_f64(P.VU.elt(rs1_num, i)); \ + BODY32; \ + set_fp_exceptions; \ + break; \ + } \ + default: \ + require(0); \ + }; \ + DEBUG_RVV_FP_VV; \ + VI_VFP_LOOP_END + +#define VI_VFP_LOOP_SCALE_BASE \ + require_fp; \ + require_vector(true); \ + require(STATE.frm->read() < 0x5); \ + reg_t vl = P.VU.vl->read(); \ + reg_t rd_num = insn.rd(); \ + reg_t rs1_num = insn.rs1(); \ + reg_t rs2_num = insn.rs2(); \ + softfloat_roundingMode = STATE.frm->read(); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); + +#define VI_VFP_CVT_LOOP(CVT_PARAMS, CHECK, BODY) \ + CHECK \ + VI_VFP_LOOP_SCALE_BASE \ + CVT_PARAMS \ + BODY \ + set_fp_exceptions; \ + VI_VFP_LOOP_END + +#define VI_VFP_CVT_INT_TO_FP(BODY16, BODY32, BODY64, sign) \ + VI_CHECK_SSS(false); \ + VI_VFP_COMMON \ + switch (P.VU.vsew) { \ + case e16: \ + { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(16, 16, sign), \ + { p->extension_enabled(EXT_ZFH); }, \ + BODY16); } \ + break; \ + case e32: \ + { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(32, 32, sign), \ + { p->extension_enabled('F'); }, \ + BODY32); } \ + break; \ + case e64: \ + { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(64, 64, sign), \ + { p->extension_enabled('D'); }, \ + BODY64); } \ + break; \ + default: \ + require(0); \ + break; \ + } + +#define VI_VFP_CVT_FP_TO_INT(BODY16, BODY32, BODY64, sign) \ + VI_CHECK_SSS(false); \ + VI_VFP_COMMON \ + switch (P.VU.vsew) { \ + case e16: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(16, 16, sign), \ + { p->extension_enabled(EXT_ZFH); }, \ + BODY16); } \ + break; \ + case e32: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(32, 32, sign), \ + { p->extension_enabled('F'); }, \ + BODY32); } \ + break; \ + case e64: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(64, 64, sign), \ + { p->extension_enabled('D'); }, \ + BODY64); } \ + break; \ + default: \ + require(0); \ + break; \ + } + +#define VI_VFP_WCVT_FP_TO_FP(BODY8, BODY16, BODY32, \ + CHECK8, CHECK16, CHECK32) \ + VI_CHECK_DSS(false); \ + switch (P.VU.vsew) { \ + case e16: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_FP_PARAMS(16, 32), CHECK16, BODY16); } \ + break; \ + case e32: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_FP_PARAMS(32, 64), CHECK32, BODY32); } \ + break; \ + default: \ + require(0); \ + break; \ + } + +#define VI_VFP_WCVT_INT_TO_FP(BODY8, BODY16, BODY32, \ + CHECK8, CHECK16, CHECK32, \ + sign) \ + VI_CHECK_DSS(false); \ + switch (P.VU.vsew) { \ + case e8: \ + { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(8, 16, sign), CHECK8, BODY8); } \ + break; \ + case e16: \ + { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(16, 32, sign), CHECK16, BODY16); } \ + break; \ + case e32: \ + { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(32, 64, sign), CHECK32, BODY32); } \ + break; \ + default: \ + require(0); \ + break; \ + } + +#define VI_VFP_WCVT_FP_TO_INT(BODY8, BODY16, BODY32, \ + CHECK8, CHECK16, CHECK32, \ + sign) \ + VI_CHECK_DSS(false); \ + switch (P.VU.vsew) { \ + case e16: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(16, 32, sign), CHECK16, BODY16); } \ + break; \ + case e32: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(32, 64, sign), CHECK32, BODY32); } \ + break; \ + default: \ + require(0); \ + break; \ + } + +#define VI_VFP_NCVT_FP_TO_FP(BODY8, BODY16, BODY32, \ + CHECK8, CHECK16, CHECK32) \ + VI_CHECK_SDS(false); \ + switch (P.VU.vsew) { \ + case e16: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_FP_PARAMS(32, 16), CHECK16, BODY16); } \ + break; \ + case e32: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_FP_PARAMS(64, 32), CHECK32, BODY32); } \ + break; \ + default: \ + require(0); \ + break; \ + } + +#define VI_VFP_NCVT_INT_TO_FP(BODY8, BODY16, BODY32, \ + CHECK8, CHECK16, CHECK32, \ + sign) \ + VI_CHECK_SDS(false); \ + switch (P.VU.vsew) { \ + case e16: \ + { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(32, 16, sign), CHECK16, BODY16); } \ + break; \ + case e32: \ + { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(64, 32, sign), CHECK32, BODY32); } \ + break; \ + default: \ + require(0); \ + break; \ + } + +#define VI_VFP_NCVT_FP_TO_INT(BODY8, BODY16, BODY32, \ + CHECK8, CHECK16, CHECK32, \ + sign) \ + VI_CHECK_SDS(false); \ + switch (P.VU.vsew) { \ + case e8: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(16, 8, sign), CHECK8, BODY8); } \ + break; \ + case e16: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(32, 16, sign), CHECK16, BODY16); } \ + break; \ + case e32: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(64, 32, sign), CHECK32, BODY32); } \ + break; \ + default: \ + require(0); \ + break; \ + } + +// The p-extension support is contributed by +// Programming Langauge Lab, Department of Computer Science, National Tsing-Hua University, Taiwan + +#define P_FIELD(R, INDEX, SIZE) \ + (type_sew_t::type)get_field(R, make_mask64(((INDEX) * SIZE), SIZE)) + +#define P_UFIELD(R, INDEX, SIZE) \ + (type_usew_t::type)get_field(R, make_mask64(((INDEX) * SIZE), SIZE)) + +#define P_B(R, INDEX) P_UFIELD(R, INDEX, 8) +#define P_H(R, INDEX) P_UFIELD(R, INDEX, 16) +#define P_W(R, INDEX) P_UFIELD(R, INDEX, 32) +#define P_SB(R, INDEX) P_FIELD(R, INDEX, 8) +#define P_SH(R, INDEX) P_FIELD(R, INDEX, 16) +#define P_SW(R, INDEX) P_FIELD(R, INDEX, 32) + +#define READ_REG_PAIR(reg) ({ \ + require((reg) % 2 == 0); \ + (reg) == 0 ? reg_t(0) : \ + (READ_REG((reg) + 1) << 32) + zext32(READ_REG(reg)); }) + +#define RS1_PAIR READ_REG_PAIR(insn.rs1()) +#define RS2_PAIR READ_REG_PAIR(insn.rs2()) +#define RD_PAIR READ_REG_PAIR(insn.rd()) + +#define WRITE_PD() \ + rd_tmp = set_field(rd_tmp, make_mask64((i * sizeof(pd) * 8), sizeof(pd) * 8), pd); + +#define WRITE_RD_PAIR(value) \ + if (insn.rd() != 0) { \ + require(insn.rd() % 2 == 0); \ + WRITE_REG(insn.rd(), sext32(value)); \ + WRITE_REG(insn.rd() + 1, (sreg_t(value)) >> 32); \ + } + +#define P_SET_OV(ov) \ + if (ov) P.VU.vxsat->write(1); + +#define P_SAT(R, BIT) \ + if (R > INT##BIT##_MAX) { \ + R = INT##BIT##_MAX; \ + P_SET_OV(1); \ + } else if (R < INT##BIT##_MIN) { \ + R = INT##BIT##_MIN; \ + P_SET_OV(1); \ + } + +#define P_SATU(R, BIT) \ + if (R > UINT##BIT##_MAX) { \ + R = UINT##BIT##_MAX; \ + P_SET_OV(1); \ + } else if (R < 0) { \ + P_SET_OV(1); \ + R = 0; \ + } + +#define P_LOOP_BASE(BIT) \ + require_extension(EXT_ZPN); \ + require(BIT == e8 || BIT == e16 || BIT == e32); \ + reg_t rd_tmp = RD; \ + reg_t rs1 = RS1; \ + reg_t rs2 = RS2; \ + sreg_t len = xlen / BIT; \ + for (sreg_t i = len - 1; i >= 0; --i) { + +#define P_ONE_LOOP_BASE(BIT) \ + require_extension(EXT_ZPN); \ + require(BIT == e8 || BIT == e16 || BIT == e32); \ + reg_t rd_tmp = RD; \ + reg_t rs1 = RS1; \ + sreg_t len = xlen / BIT; \ + for (sreg_t i = len - 1; i >= 0; --i) { + +#define P_I_LOOP_BASE(BIT, IMMBIT) \ + require_extension(EXT_ZPN); \ + require(BIT == e8 || BIT == e16 || BIT == e32); \ + reg_t rd_tmp = RD; \ + reg_t rs1 = RS1; \ + type_usew_t::type imm##IMMBIT##u = insn.p_imm##IMMBIT(); \ + sreg_t len = xlen / BIT; \ + for (sreg_t i = len - 1; i >= 0; --i) { + +#define P_X_LOOP_BASE(BIT, LOWBIT) \ + require_extension(EXT_ZPN); \ + require(BIT == e8 || BIT == e16 || BIT == e32); \ + reg_t rd_tmp = RD; \ + reg_t rs1 = RS1; \ + type_usew_t::type sa = RS2 & ((uint64_t(1) << LOWBIT) - 1); \ + type_sew_t::type ssa = int64_t(RS2) << (64 - LOWBIT) >> (64 - LOWBIT); \ + sreg_t len = xlen / BIT; \ + for (sreg_t i = len - 1; i >= 0; --i) { + +#define P_MUL_LOOP_BASE(BIT) \ + require_extension(EXT_ZPN); \ + require(BIT == e8 || BIT == e16 || BIT == e32); \ + reg_t rd_tmp = RD; \ + reg_t rs1 = RS1; \ + reg_t rs2 = RS2; \ + sreg_t len = 32 / BIT; \ + for (sreg_t i = len - 1; i >= 0; --i) { + +#define P_REDUCTION_LOOP_BASE(BIT, BIT_INNER, USE_RD) \ + require_extension(EXT_ZPN); \ + require(BIT == e16 || BIT == e32 || BIT == e64); \ + reg_t rd_tmp = USE_RD ? zext_xlen(RD) : 0; \ + reg_t rs1 = zext_xlen(RS1); \ + reg_t rs2 = zext_xlen(RS2); \ + sreg_t len = 64 / BIT; \ + sreg_t len_inner = BIT / BIT_INNER; \ + for (sreg_t i = len - 1; i >= 0; --i) { \ + sreg_t pd_res = P_FIELD(rd_tmp, i, BIT); \ + for (sreg_t j = i * len_inner; j < (i + 1) * len_inner; ++j) { + +#define P_REDUCTION_ULOOP_BASE(BIT, BIT_INNER, USE_RD) \ + require_extension(EXT_ZPN); \ + require(BIT == e16 || BIT == e32 || BIT == e64); \ + reg_t rd_tmp = USE_RD ? zext_xlen(RD) : 0; \ + reg_t rs1 = zext_xlen(RS1); \ + reg_t rs2 = zext_xlen(RS2); \ + sreg_t len = 64 / BIT; \ + sreg_t len_inner = BIT / BIT_INNER; \ + for (sreg_t i = len - 1; i >=0; --i) { \ + reg_t pd_res = P_UFIELD(rd_tmp, i, BIT); \ + for (sreg_t j = i * len_inner; j < (i + 1) * len_inner; ++j) { + +#define P_PARAMS(BIT) \ + auto pd = P_FIELD(rd_tmp, i, BIT); \ + auto ps1 = P_FIELD(rs1, i, BIT); \ + auto ps2 = P_FIELD(rs2, i, BIT); + +#define P_UPARAMS(BIT) \ + auto pd = P_UFIELD(rd_tmp, i, BIT); \ + auto ps1 = P_UFIELD(rs1, i, BIT); \ + auto ps2 = P_UFIELD(rs2, i, BIT); + +#define P_CORSS_PARAMS(BIT) \ + auto pd = P_FIELD(rd_tmp, i, BIT); \ + auto ps1 = P_FIELD(rs1, i, BIT); \ + auto ps2 = P_FIELD(rs2, (i ^ 1), BIT); + +#define P_CORSS_UPARAMS(BIT) \ + auto pd = P_UFIELD(rd_tmp, i, BIT); \ + auto ps1 = P_UFIELD(rs1, i, BIT); \ + auto ps2 = P_UFIELD(rs2, (i ^ 1), BIT); + +#define P_ONE_PARAMS(BIT) \ + auto pd = P_FIELD(rd_tmp, i, BIT); \ + auto ps1 = P_FIELD(rs1, i, BIT); + +#define P_ONE_UPARAMS(BIT) \ + auto pd = P_UFIELD(rd_tmp, i, BIT); \ + auto ps1 = P_UFIELD(rs1, i, BIT); + +#define P_ONE_SUPARAMS(BIT) \ + auto pd = P_UFIELD(rd_tmp, i, BIT); \ + auto ps1 = P_FIELD(rs1, i, BIT); + +#define P_MUL_PARAMS(BIT) \ + auto pd = P_FIELD(rd_tmp, i, BIT * 2); \ + auto ps1 = P_FIELD(rs1, i, BIT); \ + auto ps2 = P_FIELD(rs2, i, BIT); + +#define P_MUL_UPARAMS(BIT) \ + auto pd = P_UFIELD(rd_tmp, i, BIT * 2); \ + auto ps1 = P_UFIELD(rs1, i, BIT); \ + auto ps2 = P_UFIELD(rs2, i, BIT); + +#define P_MUL_CROSS_PARAMS(BIT) \ + auto pd = P_FIELD(rd_tmp, i, BIT * 2); \ + auto ps1 = P_FIELD(rs1, i, BIT); \ + auto ps2 = P_FIELD(rs2, (i ^ 1), BIT); + +#define P_MUL_CROSS_UPARAMS(BIT) \ + auto pd = P_UFIELD(rd_tmp, i, BIT*2); \ + auto ps1 = P_UFIELD(rs1, i, BIT); \ + auto ps2 = P_UFIELD(rs2, (i ^ 1), BIT); + +#define P_REDUCTION_PARAMS(BIT_INNER) \ + auto ps1 = P_FIELD(rs1, j, BIT_INNER); \ + auto ps2 = P_FIELD(rs2, j, BIT_INNER); + +#define P_REDUCTION_UPARAMS(BIT_INNER) \ + auto ps1 = P_UFIELD(rs1, j, BIT_INNER); \ + auto ps2 = P_UFIELD(rs2, j, BIT_INNER); + +#define P_REDUCTION_SUPARAMS(BIT_INNER) \ + auto ps1 = P_FIELD(rs1, j, BIT_INNER); \ + auto ps2 = P_UFIELD(rs2, j, BIT_INNER); + +#define P_REDUCTION_CROSS_PARAMS(BIT_INNER) \ + auto ps1 = P_FIELD(rs1, j, BIT_INNER); \ + auto ps2 = P_FIELD(rs2, (j ^ 1), BIT_INNER); + +#define P_LOOP_BODY(BIT, BODY) { \ + P_PARAMS(BIT) \ + BODY \ + WRITE_PD(); \ +} + +#define P_ULOOP_BODY(BIT, BODY) { \ + P_UPARAMS(BIT) \ + BODY \ + WRITE_PD(); \ +} + +#define P_ONE_LOOP_BODY(BIT, BODY) { \ + P_ONE_PARAMS(BIT) \ + BODY \ + WRITE_PD(); \ +} + +#define P_CROSS_LOOP_BODY(BIT, BODY) { \ + P_CORSS_PARAMS(BIT) \ + BODY \ + WRITE_PD(); \ +} + +#define P_CROSS_ULOOP_BODY(BIT, BODY) { \ + P_CORSS_UPARAMS(BIT) \ + BODY \ + WRITE_PD(); \ +} + +#define P_ONE_ULOOP_BODY(BIT, BODY) { \ + P_ONE_UPARAMS(BIT) \ + BODY \ + WRITE_PD(); \ +} + +#define P_MUL_LOOP_BODY(BIT, BODY) { \ + P_MUL_PARAMS(BIT) \ + BODY \ + WRITE_PD(); \ +} + +#define P_MUL_ULOOP_BODY(BIT, BODY) { \ + P_MUL_UPARAMS(BIT) \ + BODY \ + WRITE_PD(); \ +} + +#define P_MUL_CROSS_LOOP_BODY(BIT, BODY) { \ + P_MUL_CROSS_PARAMS(BIT) \ + BODY \ + WRITE_PD(); \ +} + +#define P_MUL_CROSS_ULOOP_BODY(BIT, BODY) { \ + P_MUL_CROSS_UPARAMS(BIT) \ + BODY \ + WRITE_PD(); \ +} + +#define P_LOOP(BIT, BODY) \ + P_LOOP_BASE(BIT) \ + P_LOOP_BODY(BIT, BODY) \ + P_LOOP_END() + +#define P_ONE_LOOP(BIT, BODY) \ + P_ONE_LOOP_BASE(BIT) \ + P_ONE_LOOP_BODY(BIT, BODY) \ + P_LOOP_END() + +#define P_ULOOP(BIT, BODY) \ + P_LOOP_BASE(BIT) \ + P_ULOOP_BODY(BIT, BODY) \ + P_LOOP_END() + +#define P_CROSS_LOOP(BIT, BODY1, BODY2) \ + P_LOOP_BASE(BIT) \ + P_CROSS_LOOP_BODY(BIT, BODY1) \ + --i; \ + if (sizeof(#BODY2) == 1) { \ + P_CROSS_LOOP_BODY(BIT, BODY1) \ + } \ + else { \ + P_CROSS_LOOP_BODY(BIT, BODY2) \ + } \ + P_LOOP_END() + +#define P_CROSS_ULOOP(BIT, BODY1, BODY2) \ + P_LOOP_BASE(BIT) \ + P_CROSS_ULOOP_BODY(BIT, BODY1) \ + --i; \ + P_CROSS_ULOOP_BODY(BIT, BODY2) \ + P_LOOP_END() + +#define P_STRAIGHT_LOOP(BIT, BODY1, BODY2) \ + P_LOOP_BASE(BIT) \ + P_LOOP_BODY(BIT, BODY1) \ + --i; \ + P_LOOP_BODY(BIT, BODY2) \ + P_LOOP_END() + +#define P_STRAIGHT_ULOOP(BIT, BODY1, BODY2) \ + P_LOOP_BASE(BIT) \ + P_ULOOP_BODY(BIT, BODY1) \ + --i; \ + P_ULOOP_BODY(BIT, BODY2) \ + P_LOOP_END() + +#define P_X_LOOP(BIT, RS2_LOW_BIT, BODY) \ + P_X_LOOP_BASE(BIT, RS2_LOW_BIT) \ + P_ONE_LOOP_BODY(BIT, BODY) \ + P_LOOP_END() + +#define P_X_ULOOP(BIT, RS2_LOW_BIT, BODY) \ + P_X_LOOP_BASE(BIT, RS2_LOW_BIT) \ + P_ONE_ULOOP_BODY(BIT, BODY) \ + P_LOOP_END() + +#define P_I_LOOP(BIT, IMMBIT, BODY) \ + P_I_LOOP_BASE(BIT, IMMBIT) \ + P_ONE_LOOP_BODY(BIT, BODY) \ + P_LOOP_END() + +#define P_I_ULOOP(BIT, IMMBIT, BODY) \ + P_I_LOOP_BASE(BIT, IMMBIT) \ + P_ONE_ULOOP_BODY(BIT, BODY) \ + P_LOOP_END() + +#define P_MUL_LOOP(BIT, BODY) \ + P_MUL_LOOP_BASE(BIT) \ + P_MUL_LOOP_BODY(BIT, BODY) \ + P_PAIR_LOOP_END() + +#define P_MUL_ULOOP(BIT, BODY) \ + P_MUL_LOOP_BASE(BIT) \ + P_MUL_ULOOP_BODY(BIT, BODY) \ + P_PAIR_LOOP_END() + +#define P_MUL_CROSS_LOOP(BIT, BODY) \ + P_MUL_LOOP_BASE(BIT) \ + P_MUL_CROSS_LOOP_BODY(BIT, BODY) \ + P_PAIR_LOOP_END() + +#define P_MUL_CROSS_ULOOP(BIT, BODY) \ + P_MUL_LOOP_BASE(BIT) \ + P_MUL_CROSS_ULOOP_BODY(BIT, BODY) \ + P_PAIR_LOOP_END() + +#define P_REDUCTION_LOOP(BIT, BIT_INNER, USE_RD, IS_SAT, BODY) \ + P_REDUCTION_LOOP_BASE(BIT, BIT_INNER, USE_RD) \ + P_REDUCTION_PARAMS(BIT_INNER) \ + BODY \ + P_REDUCTION_LOOP_END(BIT, IS_SAT) + +#define P_REDUCTION_ULOOP(BIT, BIT_INNER, USE_RD, IS_SAT, BODY) \ + P_REDUCTION_ULOOP_BASE(BIT, BIT_INNER, USE_RD) \ + P_REDUCTION_UPARAMS(BIT_INNER) \ + BODY \ + P_REDUCTION_ULOOP_END(BIT, IS_SAT) + +#define P_REDUCTION_SULOOP(BIT, BIT_INNER, USE_RD, IS_SAT, BODY) \ + P_REDUCTION_LOOP_BASE(BIT, BIT_INNER, USE_RD) \ + P_REDUCTION_SUPARAMS(BIT_INNER) \ + BODY \ + P_REDUCTION_LOOP_END(BIT, IS_SAT) + +#define P_REDUCTION_CROSS_LOOP(BIT, BIT_INNER, USE_RD, IS_SAT, BODY) \ + P_REDUCTION_LOOP_BASE(BIT, BIT_INNER, USE_RD) \ + P_REDUCTION_CROSS_PARAMS(BIT_INNER) \ + BODY \ + P_REDUCTION_LOOP_END(BIT, IS_SAT) + +#define P_LOOP_END() \ + } \ + WRITE_RD(sext_xlen(rd_tmp)); + +#define P_PAIR_LOOP_END() \ + } \ + if (xlen == 32) { \ + WRITE_RD_PAIR(rd_tmp); \ + } \ + else { \ + WRITE_RD(sext_xlen(rd_tmp)); \ + } + +#define P_REDUCTION_LOOP_END(BIT, IS_SAT) \ + } \ + if (IS_SAT) { \ + P_SAT(pd_res, BIT); \ + } \ + type_usew_t::type pd = pd_res; \ + WRITE_PD(); \ + } \ + WRITE_RD(sext_xlen(rd_tmp)); + +#define P_REDUCTION_ULOOP_END(BIT, IS_SAT) \ + } \ + if (IS_SAT) { \ + P_SATU(pd_res, BIT); \ + } \ + type_usew_t::type pd = pd_res; \ + WRITE_PD(); \ + } \ + WRITE_RD(sext_xlen(rd_tmp)); + +#define P_SUNPKD8(X, Y) \ + require_extension(EXT_ZPN); \ + reg_t rd_tmp = 0; \ + int16_t pd[4] = { \ + P_SB(RS1, Y), \ + P_SB(RS1, X), \ + P_SB(RS1, Y + 4), \ + P_SB(RS1, X + 4), \ + }; \ + if (xlen == 64) { \ + memcpy(&rd_tmp, pd, 8); \ + } else { \ + memcpy(&rd_tmp, pd, 4); \ + } \ + WRITE_RD(sext_xlen(rd_tmp)); + +#define P_ZUNPKD8(X, Y) \ + require_extension(EXT_ZPN); \ + reg_t rd_tmp = 0; \ + uint16_t pd[4] = { \ + P_B(RS1, Y), \ + P_B(RS1, X), \ + P_B(RS1, Y + 4), \ + P_B(RS1, X + 4), \ + }; \ + if (xlen == 64) { \ + memcpy(&rd_tmp, pd, 8); \ + } else { \ + memcpy(&rd_tmp, pd, 4); \ + } \ + WRITE_RD(sext_xlen(rd_tmp)); + +#define P_PK(BIT, X, Y) \ + require_extension(EXT_ZPN); \ + require(BIT == e16 || BIT == e32); \ + reg_t rd_tmp = 0, rs1 = RS1, rs2 = RS2; \ + for (sreg_t i = 0; i < xlen / BIT / 2; i++) { \ + rd_tmp = set_field(rd_tmp, make_mask64(i * 2 * BIT, BIT), \ + P_UFIELD(RS2, i * 2 + Y, BIT)); \ + rd_tmp = set_field(rd_tmp, make_mask64((i * 2 + 1) * BIT, BIT), \ + P_UFIELD(RS1, i * 2 + X, BIT)); \ + } \ + WRITE_RD(sext_xlen(rd_tmp)); + +#define P_64_PROFILE_BASE() \ + require_extension(EXT_ZPSFOPERAND); \ + sreg_t rd, rs1, rs2; + +#define P_64_UPROFILE_BASE() \ + require_extension(EXT_ZPSFOPERAND); \ + reg_t rd, rs1, rs2; + +#define P_64_PROFILE_PARAM(USE_RD, INPUT_PAIR) \ + if (xlen == 32) { \ + rs1 = INPUT_PAIR ? RS1_PAIR : RS1; \ + rs2 = INPUT_PAIR ? RS2_PAIR : RS2; \ + rd = USE_RD ? RD_PAIR : 0; \ + } else { \ + rs1 = RS1; \ + rs2 = RS2; \ + rd = USE_RD ? RD : 0; \ + } + +#define P_64_PROFILE(BODY) \ + P_64_PROFILE_BASE() \ + P_64_PROFILE_PARAM(false, true) \ + BODY \ + P_64_PROFILE_END() \ + +#define P_64_UPROFILE(BODY) \ + P_64_UPROFILE_BASE() \ + P_64_PROFILE_PARAM(false, true) \ + BODY \ + P_64_PROFILE_END() \ + +#define P_64_PROFILE_REDUCTION(BIT, BODY) \ + P_64_PROFILE_BASE() \ + P_64_PROFILE_PARAM(true, false) \ + for (sreg_t i = 0; i < xlen / BIT; i++) { \ + sreg_t ps1 = P_FIELD(rs1, i, BIT); \ + sreg_t ps2 = P_FIELD(rs2, i, BIT); \ + BODY \ + } \ + P_64_PROFILE_END() \ + +#define P_64_UPROFILE_REDUCTION(BIT, BODY) \ + P_64_UPROFILE_BASE() \ + P_64_PROFILE_PARAM(true, false) \ + for (sreg_t i = 0; i < xlen / BIT; i++) { \ + reg_t ps1 = P_UFIELD(rs1, i, BIT); \ + reg_t ps2 = P_UFIELD(rs2, i, BIT); \ + BODY \ + } \ + P_64_PROFILE_END() \ + +#define P_64_PROFILE_END() \ + if (xlen == 32) { \ + WRITE_RD_PAIR(rd); \ + } else { \ + WRITE_RD(sext_xlen(rd)); \ + } + +#define DECLARE_XENVCFG_VARS(field) \ + reg_t m##field = get_field(STATE.menvcfg->read(), MENVCFG_##field); \ + reg_t s##field = get_field(STATE.senvcfg->read(), SENVCFG_##field); \ + reg_t h##field = get_field(STATE.henvcfg->read(), HENVCFG_##field) + +#define DEBUG_START 0x0 +#define DEBUG_END (0x1000 - 1) + +#endif diff --git a/vendor/riscv-isa-sim/riscv/devices.cc b/vendor/riscv-isa-sim/riscv/devices.cc new file mode 100644 index 00000000..eb677a58 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/devices.cc @@ -0,0 +1,139 @@ +#include "devices.h" +#include "mmu.h" +#include + +void bus_t::add_device(reg_t addr, abstract_device_t* dev) +{ + // Searching devices via lower_bound/upper_bound + // implicitly relies on the underlying std::map + // container to sort the keys and provide ordered + // iteration over this sort, which it does. (python's + // SortedDict is a good analogy) + devices[addr] = dev; +} + +bool bus_t::load(reg_t addr, size_t len, uint8_t* bytes) +{ + // Find the device with the base address closest to but + // less than addr (price-is-right search) + auto it = devices.upper_bound(addr); + if (devices.empty() || it == devices.begin()) { + // Either the bus is empty, or there weren't + // any items with a base address <= addr + return false; + } + // Found at least one item with base address <= addr + // The iterator points to the device after this, so + // go back by one item. + it--; + return it->second->load(addr - it->first, len, bytes); +} + +bool bus_t::store(reg_t addr, size_t len, const uint8_t* bytes) +{ + // See comments in bus_t::load + auto it = devices.upper_bound(addr); + if (devices.empty() || it == devices.begin()) { + return false; + } + it--; + return it->second->store(addr - it->first, len, bytes); +} + +std::pair bus_t::find_device(reg_t addr) +{ + // See comments in bus_t::load + auto it = devices.upper_bound(addr); + if (devices.empty() || it == devices.begin()) { + return std::make_pair((reg_t)0, (abstract_device_t*)NULL); + } + it--; + return std::make_pair(it->first, it->second); +} + +// Type for holding all registered MMIO plugins by name. +using mmio_plugin_map_t = std::map; + +// Simple singleton instance of an mmio_plugin_map_t. +static mmio_plugin_map_t& mmio_plugin_map() +{ + static mmio_plugin_map_t instance; + return instance; +} + +void register_mmio_plugin(const char* name_cstr, + const mmio_plugin_t* mmio_plugin) +{ + std::string name(name_cstr); + if (!mmio_plugin_map().emplace(name, *mmio_plugin).second) { + throw std::runtime_error("Plugin \"" + name + "\" already registered!"); + } +} + +mmio_plugin_device_t::mmio_plugin_device_t(const std::string& name, + const std::string& args) + : plugin(mmio_plugin_map().at(name)), user_data((*plugin.alloc)(args.c_str())) +{ +} + +mmio_plugin_device_t::~mmio_plugin_device_t() +{ + (*plugin.dealloc)(user_data); +} + +bool mmio_plugin_device_t::load(reg_t addr, size_t len, uint8_t* bytes) +{ + return (*plugin.load)(user_data, addr, len, bytes); +} + +bool mmio_plugin_device_t::store(reg_t addr, size_t len, const uint8_t* bytes) +{ + return (*plugin.store)(user_data, addr, len, bytes); +} + +mem_t::mem_t(reg_t size) + : sz(size) +{ + if (size == 0 || size % PGSIZE != 0) + throw std::runtime_error("memory size must be a positive multiple of 4 KiB"); +} + +mem_t::~mem_t() +{ + for (auto& entry : sparse_memory_map) + free(entry.second); +} + +bool mem_t::load_store(reg_t addr, size_t len, uint8_t* bytes, bool store) +{ + if (addr + len < addr || addr + len > sz) + return false; + + while (len > 0) { + auto n = std::min(PGSIZE - (addr % PGSIZE), reg_t(len)); + + if (store) + memcpy(this->contents(addr), bytes, n); + else + memcpy(bytes, this->contents(addr), n); + + addr += n; + bytes += n; + len -= n; + } + + return true; +} + +char* mem_t::contents(reg_t addr) { + reg_t ppn = addr >> PGSHIFT, pgoff = addr % PGSIZE; + auto search = sparse_memory_map.find(ppn); + if (search == sparse_memory_map.end()) { + auto res = (char*)calloc(PGSIZE, 1); + if (res == nullptr) + throw std::bad_alloc(); + sparse_memory_map[ppn] = res; + return res + pgoff; + } + return search->second + pgoff; +} diff --git a/vendor/riscv-isa-sim/riscv/devices.h b/vendor/riscv-isa-sim/riscv/devices.h new file mode 100644 index 00000000..9200f29b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/devices.h @@ -0,0 +1,87 @@ +#ifndef _RISCV_DEVICES_H +#define _RISCV_DEVICES_H + +#include "decode.h" +#include "mmio_plugin.h" +#include "abstract_device.h" +#include "platform.h" +#include +#include +#include + +class processor_t; + +class bus_t : public abstract_device_t { + public: + bool load(reg_t addr, size_t len, uint8_t* bytes); + bool store(reg_t addr, size_t len, const uint8_t* bytes); + void add_device(reg_t addr, abstract_device_t* dev); + + std::pair find_device(reg_t addr); + + private: + std::map devices; +}; + +class rom_device_t : public abstract_device_t { + public: + rom_device_t(std::vector data); + bool load(reg_t addr, size_t len, uint8_t* bytes); + bool store(reg_t addr, size_t len, const uint8_t* bytes); + const std::vector& contents() { return data; } + private: + std::vector data; +}; + +class mem_t : public abstract_device_t { + public: + mem_t(reg_t size); + mem_t(const mem_t& that) = delete; + ~mem_t(); + + bool load(reg_t addr, size_t len, uint8_t* bytes) { return load_store(addr, len, bytes, false); } + bool store(reg_t addr, size_t len, const uint8_t* bytes) { return load_store(addr, len, const_cast(bytes), true); } + char* contents(reg_t addr); + reg_t size() { return sz; } + + private: + bool load_store(reg_t addr, size_t len, uint8_t* bytes, bool store); + + std::map sparse_memory_map; + reg_t sz; +}; + +class clint_t : public abstract_device_t { + public: + clint_t(std::vector&, uint64_t freq_hz, bool real_time); + bool load(reg_t addr, size_t len, uint8_t* bytes); + bool store(reg_t addr, size_t len, const uint8_t* bytes); + size_t size() { return CLINT_SIZE; } + void increment(reg_t inc); + private: + typedef uint64_t mtime_t; + typedef uint64_t mtimecmp_t; + typedef uint32_t msip_t; + std::vector& procs; + uint64_t freq_hz; + bool real_time; + uint64_t real_time_ref_secs; + uint64_t real_time_ref_usecs; + mtime_t mtime; + std::vector mtimecmp; +}; + +class mmio_plugin_device_t : public abstract_device_t { + public: + mmio_plugin_device_t(const std::string& name, const std::string& args); + virtual ~mmio_plugin_device_t() override; + + virtual bool load(reg_t addr, size_t len, uint8_t* bytes) override; + virtual bool store(reg_t addr, size_t len, const uint8_t* bytes) override; + + private: + mmio_plugin_t plugin; + void* user_data; +}; + +#endif diff --git a/vendor/riscv-isa-sim/riscv/disasm.h b/vendor/riscv-isa-sim/riscv/disasm.h new file mode 100644 index 00000000..338cac24 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/disasm.h @@ -0,0 +1,109 @@ +// See LICENSE for license details. + +#ifndef _RISCV_DISASM_H +#define _RISCV_DISASM_H + +#include "decode.h" +#include "isa_parser.h" +#include +#include +#include +#include + +extern const char* xpr_name[NXPR]; +extern const char* fpr_name[NFPR]; +extern const char* vr_name[NVPR]; +extern const char* csr_name(int which); + +class arg_t +{ + public: + virtual std::string to_string(insn_t val) const = 0; + virtual ~arg_t() {} +}; + +class disasm_insn_t +{ + public: + NOINLINE disasm_insn_t(const char* name_, uint32_t match, uint32_t mask, + const std::vector& args) + : match(match), mask(mask), args(args) + { + name = name_; + std::replace(name.begin(), name.end(), '_', '.'); + } + + bool operator == (insn_t insn) const + { + return (insn.bits() & mask) == match; + } + + const char* get_name() const + { + return name.c_str(); + } + + std::string to_string(insn_t insn) const + { + std::string s(name); + + if (args.size()) + { + bool next_arg_optional = false; + s += std::string(std::max(1, 8 - int(name.size())), ' '); + for (size_t i = 0; i < args.size(); i++) { + if (args[i] == nullptr) { + next_arg_optional = true; + continue; + } + std::string argString = args[i]->to_string(insn); + if (next_arg_optional) { + next_arg_optional = false; + if (argString.empty()) continue; + } + if (i != 0) s += ", "; + s += argString; + } + } + return s; + } + + uint32_t get_match() const { return match; } + uint32_t get_mask() const { return mask; } + + private: + uint32_t match; + uint32_t mask; + std::vector args; + std::string name; +}; + +class disassembler_t +{ + public: + disassembler_t(const isa_parser_t *isa); + ~disassembler_t(); + + std::string disassemble(insn_t insn) const; + const disasm_insn_t* lookup(insn_t insn) const; + + void add_insn(disasm_insn_t* insn); + + private: + static const int HASH_SIZE = 255; + std::vector chain[HASH_SIZE+1]; + + void add_instructions(const isa_parser_t* isa); + + const disasm_insn_t* probe_once(insn_t insn, size_t idx) const; + + static const unsigned int MASK1 = 0x7f; + static const unsigned int MASK2 = 0xe003; + + static unsigned int hash(insn_bits_t insn, unsigned int mask) + { + return (insn & mask) % HASH_SIZE; + } +}; + +#endif diff --git a/vendor/riscv-isa-sim/riscv/dts.cc b/vendor/riscv-isa-sim/riscv/dts.cc new file mode 100644 index 00000000..6b47c764 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/dts.cc @@ -0,0 +1,327 @@ +// See LICENSE for license details. + +#include "dts.h" +#include "libfdt.h" +#include "platform.h" +#include +#include +#include +#include +#include +#include +#include + +std::string make_dts(size_t insns_per_rtc_tick, size_t cpu_hz, + reg_t initrd_start, reg_t initrd_end, + const char* bootargs, + std::vector procs, + std::vector> mems) +{ + std::stringstream s; + s << std::dec << + "/dts-v1/;\n" + "\n" + "/ {\n" + " #address-cells = <2>;\n" + " #size-cells = <2>;\n" + " compatible = \"ucbbar,spike-bare-dev\";\n" + " model = \"ucbbar,spike-bare\";\n" + " chosen {\n"; + if (initrd_start < initrd_end) { + s << " linux,initrd-start = <" << (size_t)initrd_start << ">;\n" + " linux,initrd-end = <" << (size_t)initrd_end << ">;\n"; + if (!bootargs) + bootargs = "root=/dev/ram console=hvc0 earlycon=sbi"; + } else { + if (!bootargs) + bootargs = "console=hvc0 earlycon=sbi"; + } + s << " bootargs = \""; + for (size_t i = 0; i < strlen(bootargs); i++) { + if (bootargs[i] == '"') + s << '\\' << bootargs[i]; + else + s << bootargs[i]; + } + s << "\";\n"; + s << " };\n" + " cpus {\n" + " #address-cells = <1>;\n" + " #size-cells = <0>;\n" + " timebase-frequency = <" << (cpu_hz/insns_per_rtc_tick) << ">;\n"; + for (size_t i = 0; i < procs.size(); i++) { + s << " CPU" << i << ": cpu@" << i << " {\n" + " device_type = \"cpu\";\n" + " reg = <" << i << ">;\n" + " status = \"okay\";\n" + " compatible = \"riscv\";\n" + " riscv,isa = \"" << procs[i]->get_isa().get_isa_string() << "\";\n" + " mmu-type = \"riscv," << (procs[i]->get_isa().get_max_xlen() <= 32 ? "sv32" : "sv57") << "\";\n" + " riscv,pmpregions = <16>;\n" + " riscv,pmpgranularity = <4>;\n" + " clock-frequency = <" << cpu_hz << ">;\n" + " CPU" << i << "_intc: interrupt-controller {\n" + " #address-cells = <2>;\n" + " #interrupt-cells = <1>;\n" + " interrupt-controller;\n" + " compatible = \"riscv,cpu-intc\";\n" + " };\n" + " };\n"; + } + s << " };\n"; + for (auto& m : mems) { + s << std::hex << + " memory@" << m.first << " {\n" + " device_type = \"memory\";\n" + " reg = <0x" << (m.first >> 32) << " 0x" << (m.first & (uint32_t)-1) << + " 0x" << (m.second->size() >> 16 >> 16) << " 0x" << (m.second->size() & (uint32_t)-1) << ">;\n" + " };\n"; + } + s << " soc {\n" + " #address-cells = <2>;\n" + " #size-cells = <2>;\n" + " compatible = \"ucbbar,spike-bare-soc\", \"simple-bus\";\n" + " ranges;\n" + " clint@" << CLINT_BASE << " {\n" + " compatible = \"riscv,clint0\";\n" + " interrupts-extended = <" << std::dec; + for (size_t i = 0; i < procs.size(); i++) + s << "&CPU" << i << "_intc 3 &CPU" << i << "_intc 7 "; + reg_t clintbs = CLINT_BASE; + reg_t clintsz = CLINT_SIZE; + s << std::hex << ">;\n" + " reg = <0x" << (clintbs >> 32) << " 0x" << (clintbs & (uint32_t)-1) << + " 0x" << (clintsz >> 32) << " 0x" << (clintsz & (uint32_t)-1) << ">;\n" + " };\n" + " };\n" + " htif {\n" + " compatible = \"ucb,htif0\";\n" + " };\n" + "};\n"; + return s.str(); +} + +std::string dts_compile(const std::string& dts) +{ + // Convert the DTS to DTB + int dts_pipe[2]; + pid_t dts_pid; + + fflush(NULL); // flush stdout/stderr before forking + if (pipe(dts_pipe) != 0 || (dts_pid = fork()) < 0) { + std::cerr << "Failed to fork dts child: " << strerror(errno) << std::endl; + exit(1); + } + + // Child process to output dts + if (dts_pid == 0) { + close(dts_pipe[0]); + int step, len = dts.length(); + const char *buf = dts.c_str(); + for (int done = 0; done < len; done += step) { + step = write(dts_pipe[1], buf+done, len-done); + if (step == -1) { + std::cerr << "Failed to write dts: " << strerror(errno) << std::endl; + exit(1); + } + } + close(dts_pipe[1]); + exit(0); + } + + pid_t dtb_pid; + int dtb_pipe[2]; + if (pipe(dtb_pipe) != 0 || (dtb_pid = fork()) < 0) { + std::cerr << "Failed to fork dtb child: " << strerror(errno) << std::endl; + exit(1); + } + + // Child process to output dtb + if (dtb_pid == 0) { + dup2(dts_pipe[0], 0); + dup2(dtb_pipe[1], 1); + close(dts_pipe[0]); + close(dts_pipe[1]); + close(dtb_pipe[0]); + close(dtb_pipe[1]); + execlp(DTC, DTC, "-O", "dtb", 0); + std::cerr << "Failed to run " DTC ": " << strerror(errno) << std::endl; + exit(1); + } + + close(dts_pipe[1]); + close(dts_pipe[0]); + close(dtb_pipe[1]); + + // Read-out dtb + std::stringstream dtb; + + int got; + char buf[4096]; + while ((got = read(dtb_pipe[0], buf, sizeof(buf))) > 0) { + dtb.write(buf, got); + } + if (got == -1) { + std::cerr << "Failed to read dtb: " << strerror(errno) << std::endl; + exit(1); + } + close(dtb_pipe[0]); + + // Reap children + int status; + waitpid(dts_pid, &status, 0); + if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) { + std::cerr << "Child dts process failed" << std::endl; + exit(1); + } + waitpid(dtb_pid, &status, 0); + if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) { + std::cerr << "Child dtb process failed" << std::endl; + exit(1); + } + + return dtb.str(); +} + + +static int fdt_get_node_addr_size(void *fdt, int node, reg_t *addr, + unsigned long *size, const char *field) +{ + int parent, len, i; + int cell_addr, cell_size; + const fdt32_t *prop_addr, *prop_size; + uint64_t temp = 0; + + parent = fdt_parent_offset(fdt, node); + if (parent < 0) + return parent; + + cell_addr = fdt_address_cells(fdt, parent); + if (cell_addr < 1) + return -ENODEV; + + cell_size = fdt_size_cells(fdt, parent); + if (cell_size < 0) + return -ENODEV; + + if (!field) + return -ENODEV; + + prop_addr = (fdt32_t *)fdt_getprop(fdt, node, field, &len); + if (!prop_addr) + return -ENODEV; + prop_size = prop_addr + cell_addr; + + if (addr) { + for (i = 0; i < cell_addr; i++) + temp = (temp << 32) | fdt32_to_cpu(*prop_addr++); + *addr = temp; + } + temp = 0; + + if (size) { + for (i = 0; i < cell_size; i++) + temp = (temp << 32) | fdt32_to_cpu(*prop_size++); + *size = temp; + } + + return 0; +} + +static int check_cpu_node(void *fdt, int cpu_offset) +{ + int len; + const void *prop; + + if (!fdt || cpu_offset < 0) + return -EINVAL; + + prop = fdt_getprop(fdt, cpu_offset, "device_type", &len); + if (!prop || !len) + return -EINVAL; + if (strncmp ((char *)prop, "cpu", strlen ("cpu"))) + return -EINVAL; + + return 0; +} + + +int fdt_get_offset(void *fdt, const char *field) +{ + return fdt_path_offset(fdt, field); +} + +int fdt_get_first_subnode(void *fdt, int node) +{ + return fdt_first_subnode(fdt, node); +} + +int fdt_get_next_subnode(void *fdt, int node) +{ + return fdt_next_subnode(fdt, node); +} + +int fdt_parse_clint(void *fdt, reg_t *clint_addr, + const char *compatible) +{ + int nodeoffset, rc; + + nodeoffset = fdt_node_offset_by_compatible(fdt, -1, compatible); + if (nodeoffset < 0) + return nodeoffset; + + rc = fdt_get_node_addr_size(fdt, nodeoffset, clint_addr, NULL, "reg"); + if (rc < 0 || !clint_addr) + return -ENODEV; + + return 0; +} + +int fdt_parse_pmp_num(void *fdt, int cpu_offset, reg_t *pmp_num) +{ + int rc; + + if ((rc = check_cpu_node(fdt, cpu_offset)) < 0) + return rc; + + rc = fdt_get_node_addr_size(fdt, cpu_offset, pmp_num, NULL, + "riscv,pmpregions"); + if (rc < 0 || !pmp_num) + return -ENODEV; + + return 0; +} + +int fdt_parse_pmp_alignment(void *fdt, int cpu_offset, reg_t *pmp_align) +{ + int rc; + + if ((rc = check_cpu_node(fdt, cpu_offset)) < 0) + return rc; + + rc = fdt_get_node_addr_size(fdt, cpu_offset, pmp_align, NULL, + "riscv,pmpgranularity"); + if (rc < 0 || !pmp_align) + return -ENODEV; + + return 0; +} + +int fdt_parse_mmu_type(void *fdt, int cpu_offset, const char **mmu_type) +{ + assert(mmu_type); + + int len, rc; + const void *prop; + + if ((rc = check_cpu_node(fdt, cpu_offset)) < 0) + return rc; + + prop = fdt_getprop(fdt, cpu_offset, "mmu-type", &len); + if (!prop || !len) + return -EINVAL; + + *mmu_type = (const char *)prop; + + return 0; +} diff --git a/vendor/riscv-isa-sim/riscv/dts.h b/vendor/riscv-isa-sim/riscv/dts.h new file mode 100644 index 00000000..62081511 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/dts.h @@ -0,0 +1,27 @@ +// See LICENSE for license details. +#ifndef _RISCV_DTS_H +#define _RISCV_DTS_H + +#include "devices.h" +#include "processor.h" +#include "mmu.h" +#include + +std::string make_dts(size_t insns_per_rtc_tick, size_t cpu_hz, + reg_t initrd_start, reg_t initrd_end, + const char* bootargs, + std::vector procs, + std::vector> mems); + +std::string dts_compile(const std::string& dts); + +int fdt_get_offset(void *fdt, const char *field); +int fdt_get_first_subnode(void *fdt, int node); +int fdt_get_next_subnode(void *fdt, int node); + +int fdt_parse_clint(void *fdt, reg_t *clint_addr, + const char *compatible); +int fdt_parse_pmp_num(void *fdt, int cpu_offset, reg_t *pmp_num); +int fdt_parse_pmp_alignment(void *fdt, int cpu_offset, reg_t *pmp_align); +int fdt_parse_mmu_type(void *fdt, int cpu_offset, const char **mmu_type); +#endif diff --git a/vendor/riscv-isa-sim/riscv/encoding.h b/vendor/riscv-isa-sim/riscv/encoding.h new file mode 100644 index 00000000..e6dbd7c0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/encoding.h @@ -0,0 +1,4810 @@ +/* + * This file is auto-generated by running 'make' in + * https://github.com/riscv/riscv-opcodes (d2b9aea) + */ + +/* See LICENSE for license details. */ + +#ifndef RISCV_CSR_ENCODING_H +#define RISCV_CSR_ENCODING_H + +#define MSTATUS_UIE 0x00000001 +#define MSTATUS_SIE 0x00000002 +#define MSTATUS_HIE 0x00000004 +#define MSTATUS_MIE 0x00000008 +#define MSTATUS_UPIE 0x00000010 +#define MSTATUS_SPIE 0x00000020 +#define MSTATUS_UBE 0x00000040 +#define MSTATUS_MPIE 0x00000080 +#define MSTATUS_SPP 0x00000100 +#define MSTATUS_VS 0x00000600 +#define MSTATUS_MPP 0x00001800 +#define MSTATUS_FS 0x00006000 +#define MSTATUS_XS 0x00018000 +#define MSTATUS_MPRV 0x00020000 +#define MSTATUS_SUM 0x00040000 +#define MSTATUS_MXR 0x00080000 +#define MSTATUS_TVM 0x00100000 +#define MSTATUS_TW 0x00200000 +#define MSTATUS_TSR 0x00400000 +#define MSTATUS32_SD 0x80000000 +#define MSTATUS_UXL 0x0000000300000000 +#define MSTATUS_SXL 0x0000000C00000000 +#define MSTATUS_SBE 0x0000001000000000 +#define MSTATUS_MBE 0x0000002000000000 +#define MSTATUS_GVA 0x0000004000000000 +#define MSTATUS_MPV 0x0000008000000000 +#define MSTATUS64_SD 0x8000000000000000 + +#define MSTATUSH_SBE 0x00000010 +#define MSTATUSH_MBE 0x00000020 +#define MSTATUSH_GVA 0x00000040 +#define MSTATUSH_MPV 0x00000080 + +#define SSTATUS_UIE 0x00000001 +#define SSTATUS_SIE 0x00000002 +#define SSTATUS_UPIE 0x00000010 +#define SSTATUS_SPIE 0x00000020 +#define SSTATUS_UBE 0x00000040 +#define SSTATUS_SPP 0x00000100 +#define SSTATUS_VS 0x00000600 +#define SSTATUS_FS 0x00006000 +#define SSTATUS_XS 0x00018000 +#define SSTATUS_SUM 0x00040000 +#define SSTATUS_MXR 0x00080000 +#define SSTATUS32_SD 0x80000000 +#define SSTATUS_UXL 0x0000000300000000 +#define SSTATUS64_SD 0x8000000000000000 + +#define HSTATUS_VSXL 0x300000000 +#define HSTATUS_VTSR 0x00400000 +#define HSTATUS_VTW 0x00200000 +#define HSTATUS_VTVM 0x00100000 +#define HSTATUS_VGEIN 0x0003f000 +#define HSTATUS_HU 0x00000200 +#define HSTATUS_SPVP 0x00000100 +#define HSTATUS_SPV 0x00000080 +#define HSTATUS_GVA 0x00000040 +#define HSTATUS_VSBE 0x00000020 + +#define USTATUS_UIE 0x00000001 +#define USTATUS_UPIE 0x00000010 + +#define DCSR_XDEBUGVER (3U<<30) +#define DCSR_NDRESET (1<<29) +#define DCSR_FULLRESET (1<<28) +#define DCSR_EBREAKM (1<<15) +#define DCSR_EBREAKH (1<<14) +#define DCSR_EBREAKS (1<<13) +#define DCSR_EBREAKU (1<<12) +#define DCSR_STOPCYCLE (1<<10) +#define DCSR_STOPTIME (1<<9) +#define DCSR_CAUSE (7<<6) +#define DCSR_DEBUGINT (1<<5) +#define DCSR_HALT (1<<3) +#define DCSR_STEP (1<<2) +#define DCSR_PRV (3<<0) + +#define DCSR_CAUSE_NONE 0 +#define DCSR_CAUSE_SWBP 1 +#define DCSR_CAUSE_HWBP 2 +#define DCSR_CAUSE_DEBUGINT 3 +#define DCSR_CAUSE_STEP 4 +#define DCSR_CAUSE_HALT 5 +#define DCSR_CAUSE_GROUP 6 + +#define MCONTROL_TYPE(xlen) (0xfULL<<((xlen)-4)) +#define MCONTROL_DMODE(xlen) (1ULL<<((xlen)-5)) +#define MCONTROL_MASKMAX(xlen) (0x3fULL<<((xlen)-11)) + +#define MCONTROL_SELECT (1<<19) +#define MCONTROL_TIMING (1<<18) +#define MCONTROL_ACTION (0x3f<<12) +#define MCONTROL_CHAIN (1<<11) +#define MCONTROL_MATCH (0xf<<7) +#define MCONTROL_M (1<<6) +#define MCONTROL_H (1<<5) +#define MCONTROL_S (1<<4) +#define MCONTROL_U (1<<3) +#define MCONTROL_EXECUTE (1<<2) +#define MCONTROL_STORE (1<<1) +#define MCONTROL_LOAD (1<<0) + +#define MCONTROL_TYPE_NONE 0 +#define MCONTROL_TYPE_MATCH 2 + +#define MCONTROL_ACTION_DEBUG_EXCEPTION 0 +#define MCONTROL_ACTION_DEBUG_MODE 1 +#define MCONTROL_ACTION_TRACE_START 2 +#define MCONTROL_ACTION_TRACE_STOP 3 +#define MCONTROL_ACTION_TRACE_EMIT 4 + +#define MCONTROL_MATCH_EQUAL 0 +#define MCONTROL_MATCH_NAPOT 1 +#define MCONTROL_MATCH_GE 2 +#define MCONTROL_MATCH_LT 3 +#define MCONTROL_MATCH_MASK_LOW 4 +#define MCONTROL_MATCH_MASK_HIGH 5 + +#define MIP_USIP (1 << IRQ_U_SOFT) +#define MIP_SSIP (1 << IRQ_S_SOFT) +#define MIP_VSSIP (1 << IRQ_VS_SOFT) +#define MIP_MSIP (1 << IRQ_M_SOFT) +#define MIP_UTIP (1 << IRQ_U_TIMER) +#define MIP_STIP (1 << IRQ_S_TIMER) +#define MIP_VSTIP (1 << IRQ_VS_TIMER) +#define MIP_MTIP (1 << IRQ_M_TIMER) +#define MIP_UEIP (1 << IRQ_U_EXT) +#define MIP_SEIP (1 << IRQ_S_EXT) +#define MIP_VSEIP (1 << IRQ_VS_EXT) +#define MIP_MEIP (1 << IRQ_M_EXT) +#define MIP_SGEIP (1 << IRQ_S_GEXT) + +#define MIP_S_MASK (MIP_SSIP | MIP_STIP | MIP_SEIP) +#define MIP_VS_MASK (MIP_VSSIP | MIP_VSTIP | MIP_VSEIP) +#define MIP_HS_MASK (MIP_VS_MASK | MIP_SGEIP) + +#define MIDELEG_FORCED_MASK MIP_HS_MASK + +#define SIP_SSIP MIP_SSIP +#define SIP_STIP MIP_STIP + +#define MENVCFG_FIOM 0x00000001 +#define MENVCFG_CBIE 0x00000030 +#define MENVCFG_CBCFE 0x00000040 +#define MENVCFG_CBZE 0x00000080 +#define MENVCFG_PBMTE 0x4000000000000000 +#define MENVCFG_STCE 0x8000000000000000 + +#define MENVCFGH_PBMTE 0x40000000 +#define MENVCFGH_STCE 0x80000000 + +#define HENVCFG_FIOM 0x00000001 +#define HENVCFG_CBIE 0x00000030 +#define HENVCFG_CBCFE 0x00000040 +#define HENVCFG_CBZE 0x00000080 +#define HENVCFG_PBMTE 0x4000000000000000 +#define HENVCFG_STCE 0x8000000000000000 + +#define HENVCFGH_PBMTE 0x40000000 +#define HENVCFGH_STCE 0x80000000 + +#define SENVCFG_FIOM 0x00000001 +#define SENVCFG_CBIE 0x00000030 +#define SENVCFG_CBCFE 0x00000040 +#define SENVCFG_CBZE 0x00000080 + +#define MSECCFG_MML 0x00000001 +#define MSECCFG_MMWP 0x00000002 +#define MSECCFG_RLB 0x00000004 +#define MSECCFG_USEED 0x00000100 +#define MSECCFG_SSEED 0x00000200 + +#define PRV_U 0 +#define PRV_S 1 +#define PRV_M 3 + +#define PRV_HS (PRV_S + 1) + +#define SATP32_MODE 0x80000000 +#define SATP32_ASID 0x7FC00000 +#define SATP32_PPN 0x003FFFFF +#define SATP64_MODE 0xF000000000000000 +#define SATP64_ASID 0x0FFFF00000000000 +#define SATP64_PPN 0x00000FFFFFFFFFFF + +#define SATP_MODE_OFF 0 +#define SATP_MODE_SV32 1 +#define SATP_MODE_SV39 8 +#define SATP_MODE_SV48 9 +#define SATP_MODE_SV57 10 +#define SATP_MODE_SV64 11 + +#define HGATP32_MODE 0x80000000 +#define HGATP32_VMID 0x1FC00000 +#define HGATP32_PPN 0x003FFFFF + +#define HGATP64_MODE 0xF000000000000000 +#define HGATP64_VMID 0x03FFF00000000000 +#define HGATP64_PPN 0x00000FFFFFFFFFFF + +#define HGATP_MODE_OFF 0 +#define HGATP_MODE_SV32X4 1 +#define HGATP_MODE_SV39X4 8 +#define HGATP_MODE_SV48X4 9 +#define HGATP_MODE_SV57X4 10 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define IRQ_U_SOFT 0 +#define IRQ_S_SOFT 1 +#define IRQ_VS_SOFT 2 +#define IRQ_M_SOFT 3 +#define IRQ_U_TIMER 4 +#define IRQ_S_TIMER 5 +#define IRQ_VS_TIMER 6 +#define IRQ_M_TIMER 7 +#define IRQ_U_EXT 8 +#define IRQ_S_EXT 9 +#define IRQ_VS_EXT 10 +#define IRQ_M_EXT 11 +#define IRQ_S_GEXT 12 +#define IRQ_COP 12 +#define IRQ_HOST 13 + +/* page table entry (PTE) fields */ +#define PTE_V 0x001 /* Valid */ +#define PTE_R 0x002 /* Read */ +#define PTE_W 0x004 /* Write */ +#define PTE_X 0x008 /* Execute */ +#define PTE_U 0x010 /* User */ +#define PTE_G 0x020 /* Global */ +#define PTE_A 0x040 /* Accessed */ +#define PTE_D 0x080 /* Dirty */ +#define PTE_SOFT 0x300 /* Reserved for Software */ +#define PTE_RSVD 0x1FC0000000000000 /* Reserved for future standard use */ +#define PTE_PBMT 0x6000000000000000 /* Svpbmt: Page-based memory types */ +#define PTE_N 0x8000000000000000 /* Svnapot: NAPOT translation contiguity */ +#define PTE_ATTR 0xFFC0000000000000 /* All attributes and reserved bits */ + +#define PTE_PPN_SHIFT 10 + +#define PTE_TABLE(PTE) (((PTE) & (PTE_V | PTE_R | PTE_W | PTE_X)) == PTE_V) + +#ifdef __riscv + +#if __riscv_xlen == 64 +# define MSTATUS_SD MSTATUS64_SD +# define SSTATUS_SD SSTATUS64_SD +# define RISCV_PGLEVEL_BITS 9 +# define SATP_MODE SATP64_MODE +#else +# define MSTATUS_SD MSTATUS32_SD +# define SSTATUS_SD SSTATUS32_SD +# define RISCV_PGLEVEL_BITS 10 +# define SATP_MODE SATP32_MODE +#endif +#define RISCV_PGSHIFT 12 +#define RISCV_PGSIZE (1 << RISCV_PGSHIFT) + +#ifndef __ASSEMBLER__ + +#ifdef __GNUC__ + +#define read_csr(reg) ({ unsigned long __tmp; \ + asm volatile ("csrr %0, " #reg : "=r"(__tmp)); \ + __tmp; }) + +#define write_csr(reg, val) ({ \ + asm volatile ("csrw " #reg ", %0" :: "rK"(val)); }) + +#define swap_csr(reg, val) ({ unsigned long __tmp; \ + asm volatile ("csrrw %0, " #reg ", %1" : "=r"(__tmp) : "rK"(val)); \ + __tmp; }) + +#define set_csr(reg, bit) ({ unsigned long __tmp; \ + asm volatile ("csrrs %0, " #reg ", %1" : "=r"(__tmp) : "rK"(bit)); \ + __tmp; }) + +#define clear_csr(reg, bit) ({ unsigned long __tmp; \ + asm volatile ("csrrc %0, " #reg ", %1" : "=r"(__tmp) : "rK"(bit)); \ + __tmp; }) + +#define rdtime() read_csr(time) +#define rdcycle() read_csr(cycle) +#define rdinstret() read_csr(instret) + +#endif + +#endif + +#endif + +#endif +/* Automatically generated by parse_opcodes. */ +#ifndef RISCV_ENCODING_H +#define RISCV_ENCODING_H +#define MATCH_SLLI_RV32 0x1013 +#define MASK_SLLI_RV32 0xfe00707f +#define MATCH_SRLI_RV32 0x5013 +#define MASK_SRLI_RV32 0xfe00707f +#define MATCH_SRAI_RV32 0x40005013 +#define MASK_SRAI_RV32 0xfe00707f +#define MATCH_FRFLAGS 0x102073 +#define MASK_FRFLAGS 0xfffff07f +#define MATCH_FSFLAGS 0x101073 +#define MASK_FSFLAGS 0xfff0707f +#define MATCH_FSFLAGSI 0x105073 +#define MASK_FSFLAGSI 0xfff0707f +#define MATCH_FRRM 0x202073 +#define MASK_FRRM 0xfffff07f +#define MATCH_FSRM 0x201073 +#define MASK_FSRM 0xfff0707f +#define MATCH_FSRMI 0x205073 +#define MASK_FSRMI 0xfff0707f +#define MATCH_FSCSR 0x301073 +#define MASK_FSCSR 0xfff0707f +#define MATCH_FRCSR 0x302073 +#define MASK_FRCSR 0xfffff07f +#define MATCH_RDCYCLE 0xc0002073 +#define MASK_RDCYCLE 0xfffff07f +#define MATCH_RDTIME 0xc0102073 +#define MASK_RDTIME 0xfffff07f +#define MATCH_RDINSTRET 0xc0202073 +#define MASK_RDINSTRET 0xfffff07f +#define MATCH_RDCYCLEH 0xc8002073 +#define MASK_RDCYCLEH 0xfffff07f +#define MATCH_RDTIMEH 0xc8102073 +#define MASK_RDTIMEH 0xfffff07f +#define MATCH_RDINSTRETH 0xc8202073 +#define MASK_RDINSTRETH 0xfffff07f +#define MATCH_SCALL 0x73 +#define MASK_SCALL 0xffffffff +#define MATCH_SBREAK 0x100073 +#define MASK_SBREAK 0xffffffff +#define MATCH_FMV_X_S 0xe0000053 +#define MASK_FMV_X_S 0xfff0707f +#define MATCH_FMV_S_X 0xf0000053 +#define MASK_FMV_S_X 0xfff0707f +#define MATCH_FENCE_TSO 0x8330000f +#define MASK_FENCE_TSO 0xfff0707f +#define MATCH_PAUSE 0x100000f +#define MASK_PAUSE 0xffffffff +#define MATCH_BEQ 0x63 +#define MASK_BEQ 0x707f +#define MATCH_BNE 0x1063 +#define MASK_BNE 0x707f +#define MATCH_BLT 0x4063 +#define MASK_BLT 0x707f +#define MATCH_BGE 0x5063 +#define MASK_BGE 0x707f +#define MATCH_BLTU 0x6063 +#define MASK_BLTU 0x707f +#define MATCH_BGEU 0x7063 +#define MASK_BGEU 0x707f +#define MATCH_JALR 0x67 +#define MASK_JALR 0x707f +#define MATCH_JAL 0x6f +#define MASK_JAL 0x7f +#define MATCH_LUI 0x37 +#define MASK_LUI 0x7f +#define MATCH_AUIPC 0x17 +#define MASK_AUIPC 0x7f +#define MATCH_ADDI 0x13 +#define MASK_ADDI 0x707f +#define MATCH_SLTI 0x2013 +#define MASK_SLTI 0x707f +#define MATCH_SLTIU 0x3013 +#define MASK_SLTIU 0x707f +#define MATCH_XORI 0x4013 +#define MASK_XORI 0x707f +#define MATCH_ORI 0x6013 +#define MASK_ORI 0x707f +#define MATCH_ANDI 0x7013 +#define MASK_ANDI 0x707f +#define MATCH_ADD 0x33 +#define MASK_ADD 0xfe00707f +#define MATCH_SUB 0x40000033 +#define MASK_SUB 0xfe00707f +#define MATCH_SLL 0x1033 +#define MASK_SLL 0xfe00707f +#define MATCH_SLT 0x2033 +#define MASK_SLT 0xfe00707f +#define MATCH_SLTU 0x3033 +#define MASK_SLTU 0xfe00707f +#define MATCH_XOR 0x4033 +#define MASK_XOR 0xfe00707f +#define MATCH_SRL 0x5033 +#define MASK_SRL 0xfe00707f +#define MATCH_SRA 0x40005033 +#define MASK_SRA 0xfe00707f +#define MATCH_OR 0x6033 +#define MASK_OR 0xfe00707f +#define MATCH_AND 0x7033 +#define MASK_AND 0xfe00707f +#define MATCH_LB 0x3 +#define MASK_LB 0x707f +#define MATCH_LH 0x1003 +#define MASK_LH 0x707f +#define MATCH_LW 0x2003 +#define MASK_LW 0x707f +#define MATCH_LBU 0x4003 +#define MASK_LBU 0x707f +#define MATCH_LHU 0x5003 +#define MASK_LHU 0x707f +#define MATCH_SB 0x23 +#define MASK_SB 0x707f +#define MATCH_SH 0x1023 +#define MASK_SH 0x707f +#define MATCH_SW 0x2023 +#define MASK_SW 0x707f +#define MATCH_FENCE 0xf +#define MASK_FENCE 0x707f +#define MATCH_FENCE_I 0x100f +#define MASK_FENCE_I 0x707f +#define MATCH_ADDIW 0x1b +#define MASK_ADDIW 0x707f +#define MATCH_SLLIW 0x101b +#define MASK_SLLIW 0xfe00707f +#define MATCH_SRLIW 0x501b +#define MASK_SRLIW 0xfe00707f +#define MATCH_SRAIW 0x4000501b +#define MASK_SRAIW 0xfe00707f +#define MATCH_ADDW 0x3b +#define MASK_ADDW 0xfe00707f +#define MATCH_SUBW 0x4000003b +#define MASK_SUBW 0xfe00707f +#define MATCH_SLLW 0x103b +#define MASK_SLLW 0xfe00707f +#define MATCH_SRLW 0x503b +#define MASK_SRLW 0xfe00707f +#define MATCH_SRAW 0x4000503b +#define MASK_SRAW 0xfe00707f +#define MATCH_LD 0x3003 +#define MASK_LD 0x707f +#define MATCH_LWU 0x6003 +#define MASK_LWU 0x707f +#define MATCH_SD 0x3023 +#define MASK_SD 0x707f +#define MATCH_SLLI 0x1013 +#define MASK_SLLI 0xfc00707f +#define MATCH_SRLI 0x5013 +#define MASK_SRLI 0xfc00707f +#define MATCH_SRAI 0x40005013 +#define MASK_SRAI 0xfc00707f +#define MATCH_MUL 0x2000033 +#define MASK_MUL 0xfe00707f +#define MATCH_MULH 0x2001033 +#define MASK_MULH 0xfe00707f +#define MATCH_MULHSU 0x2002033 +#define MASK_MULHSU 0xfe00707f +#define MATCH_MULHU 0x2003033 +#define MASK_MULHU 0xfe00707f +#define MATCH_DIV 0x2004033 +#define MASK_DIV 0xfe00707f +#define MATCH_DIVU 0x2005033 +#define MASK_DIVU 0xfe00707f +#define MATCH_REM 0x2006033 +#define MASK_REM 0xfe00707f +#define MATCH_REMU 0x2007033 +#define MASK_REMU 0xfe00707f +#define MATCH_MULW 0x200003b +#define MASK_MULW 0xfe00707f +#define MATCH_DIVW 0x200403b +#define MASK_DIVW 0xfe00707f +#define MATCH_DIVUW 0x200503b +#define MASK_DIVUW 0xfe00707f +#define MATCH_REMW 0x200603b +#define MASK_REMW 0xfe00707f +#define MATCH_REMUW 0x200703b +#define MASK_REMUW 0xfe00707f +#define MATCH_AMOADD_W 0x202f +#define MASK_AMOADD_W 0xf800707f +#define MATCH_AMOXOR_W 0x2000202f +#define MASK_AMOXOR_W 0xf800707f +#define MATCH_AMOOR_W 0x4000202f +#define MASK_AMOOR_W 0xf800707f +#define MATCH_AMOAND_W 0x6000202f +#define MASK_AMOAND_W 0xf800707f +#define MATCH_AMOMIN_W 0x8000202f +#define MASK_AMOMIN_W 0xf800707f +#define MATCH_AMOMAX_W 0xa000202f +#define MASK_AMOMAX_W 0xf800707f +#define MATCH_AMOMINU_W 0xc000202f +#define MASK_AMOMINU_W 0xf800707f +#define MATCH_AMOMAXU_W 0xe000202f +#define MASK_AMOMAXU_W 0xf800707f +#define MATCH_AMOSWAP_W 0x800202f +#define MASK_AMOSWAP_W 0xf800707f +#define MATCH_LR_W 0x1000202f +#define MASK_LR_W 0xf9f0707f +#define MATCH_SC_W 0x1800202f +#define MASK_SC_W 0xf800707f +#define MATCH_AMOADD_D 0x302f +#define MASK_AMOADD_D 0xf800707f +#define MATCH_AMOXOR_D 0x2000302f +#define MASK_AMOXOR_D 0xf800707f +#define MATCH_AMOOR_D 0x4000302f +#define MASK_AMOOR_D 0xf800707f +#define MATCH_AMOAND_D 0x6000302f +#define MASK_AMOAND_D 0xf800707f +#define MATCH_AMOMIN_D 0x8000302f +#define MASK_AMOMIN_D 0xf800707f +#define MATCH_AMOMAX_D 0xa000302f +#define MASK_AMOMAX_D 0xf800707f +#define MATCH_AMOMINU_D 0xc000302f +#define MASK_AMOMINU_D 0xf800707f +#define MATCH_AMOMAXU_D 0xe000302f +#define MASK_AMOMAXU_D 0xf800707f +#define MATCH_AMOSWAP_D 0x800302f +#define MASK_AMOSWAP_D 0xf800707f +#define MATCH_LR_D 0x1000302f +#define MASK_LR_D 0xf9f0707f +#define MATCH_SC_D 0x1800302f +#define MASK_SC_D 0xf800707f +#define MATCH_HFENCE_VVMA 0x22000073 +#define MASK_HFENCE_VVMA 0xfe007fff +#define MATCH_HFENCE_GVMA 0x62000073 +#define MASK_HFENCE_GVMA 0xfe007fff +#define MATCH_HLV_B 0x60004073 +#define MASK_HLV_B 0xfff0707f +#define MATCH_HLV_BU 0x60104073 +#define MASK_HLV_BU 0xfff0707f +#define MATCH_HLV_H 0x64004073 +#define MASK_HLV_H 0xfff0707f +#define MATCH_HLV_HU 0x64104073 +#define MASK_HLV_HU 0xfff0707f +#define MATCH_HLVX_HU 0x64304073 +#define MASK_HLVX_HU 0xfff0707f +#define MATCH_HLV_W 0x68004073 +#define MASK_HLV_W 0xfff0707f +#define MATCH_HLVX_WU 0x68304073 +#define MASK_HLVX_WU 0xfff0707f +#define MATCH_HSV_B 0x62004073 +#define MASK_HSV_B 0xfe007fff +#define MATCH_HSV_H 0x66004073 +#define MASK_HSV_H 0xfe007fff +#define MATCH_HSV_W 0x6a004073 +#define MASK_HSV_W 0xfe007fff +#define MATCH_HLV_WU 0x68104073 +#define MASK_HLV_WU 0xfff0707f +#define MATCH_HLV_D 0x6c004073 +#define MASK_HLV_D 0xfff0707f +#define MATCH_HSV_D 0x6e004073 +#define MASK_HSV_D 0xfe007fff +#define MATCH_FADD_S 0x53 +#define MASK_FADD_S 0xfe00007f +#define MATCH_FSUB_S 0x8000053 +#define MASK_FSUB_S 0xfe00007f +#define MATCH_FMUL_S 0x10000053 +#define MASK_FMUL_S 0xfe00007f +#define MATCH_FDIV_S 0x18000053 +#define MASK_FDIV_S 0xfe00007f +#define MATCH_FSGNJ_S 0x20000053 +#define MASK_FSGNJ_S 0xfe00707f +#define MATCH_FSGNJN_S 0x20001053 +#define MASK_FSGNJN_S 0xfe00707f +#define MATCH_FSGNJX_S 0x20002053 +#define MASK_FSGNJX_S 0xfe00707f +#define MATCH_FMIN_S 0x28000053 +#define MASK_FMIN_S 0xfe00707f +#define MATCH_FMAX_S 0x28001053 +#define MASK_FMAX_S 0xfe00707f +#define MATCH_FSQRT_S 0x58000053 +#define MASK_FSQRT_S 0xfff0007f +#define MATCH_FLE_S 0xa0000053 +#define MASK_FLE_S 0xfe00707f +#define MATCH_FLT_S 0xa0001053 +#define MASK_FLT_S 0xfe00707f +#define MATCH_FEQ_S 0xa0002053 +#define MASK_FEQ_S 0xfe00707f +#define MATCH_FCVT_W_S 0xc0000053 +#define MASK_FCVT_W_S 0xfff0007f +#define MATCH_FCVT_WU_S 0xc0100053 +#define MASK_FCVT_WU_S 0xfff0007f +#define MATCH_FMV_X_W 0xe0000053 +#define MASK_FMV_X_W 0xfff0707f +#define MATCH_FCLASS_S 0xe0001053 +#define MASK_FCLASS_S 0xfff0707f +#define MATCH_FCVT_S_W 0xd0000053 +#define MASK_FCVT_S_W 0xfff0007f +#define MATCH_FCVT_S_WU 0xd0100053 +#define MASK_FCVT_S_WU 0xfff0007f +#define MATCH_FMV_W_X 0xf0000053 +#define MASK_FMV_W_X 0xfff0707f +#define MATCH_FLW 0x2007 +#define MASK_FLW 0x707f +#define MATCH_FSW 0x2027 +#define MASK_FSW 0x707f +#define MATCH_FMADD_S 0x43 +#define MASK_FMADD_S 0x600007f +#define MATCH_FMSUB_S 0x47 +#define MASK_FMSUB_S 0x600007f +#define MATCH_FNMSUB_S 0x4b +#define MASK_FNMSUB_S 0x600007f +#define MATCH_FNMADD_S 0x4f +#define MASK_FNMADD_S 0x600007f +#define MATCH_FCVT_L_S 0xc0200053 +#define MASK_FCVT_L_S 0xfff0007f +#define MATCH_FCVT_LU_S 0xc0300053 +#define MASK_FCVT_LU_S 0xfff0007f +#define MATCH_FCVT_S_L 0xd0200053 +#define MASK_FCVT_S_L 0xfff0007f +#define MATCH_FCVT_S_LU 0xd0300053 +#define MASK_FCVT_S_LU 0xfff0007f +#define MATCH_FADD_D 0x2000053 +#define MASK_FADD_D 0xfe00007f +#define MATCH_FSUB_D 0xa000053 +#define MASK_FSUB_D 0xfe00007f +#define MATCH_FMUL_D 0x12000053 +#define MASK_FMUL_D 0xfe00007f +#define MATCH_FDIV_D 0x1a000053 +#define MASK_FDIV_D 0xfe00007f +#define MATCH_FSGNJ_D 0x22000053 +#define MASK_FSGNJ_D 0xfe00707f +#define MATCH_FSGNJN_D 0x22001053 +#define MASK_FSGNJN_D 0xfe00707f +#define MATCH_FSGNJX_D 0x22002053 +#define MASK_FSGNJX_D 0xfe00707f +#define MATCH_FMIN_D 0x2a000053 +#define MASK_FMIN_D 0xfe00707f +#define MATCH_FMAX_D 0x2a001053 +#define MASK_FMAX_D 0xfe00707f +#define MATCH_FCVT_S_D 0x40100053 +#define MASK_FCVT_S_D 0xfff0007f +#define MATCH_FCVT_D_S 0x42000053 +#define MASK_FCVT_D_S 0xfff0007f +#define MATCH_FSQRT_D 0x5a000053 +#define MASK_FSQRT_D 0xfff0007f +#define MATCH_FLE_D 0xa2000053 +#define MASK_FLE_D 0xfe00707f +#define MATCH_FLT_D 0xa2001053 +#define MASK_FLT_D 0xfe00707f +#define MATCH_FEQ_D 0xa2002053 +#define MASK_FEQ_D 0xfe00707f +#define MATCH_FCVT_W_D 0xc2000053 +#define MASK_FCVT_W_D 0xfff0007f +#define MATCH_FCVT_WU_D 0xc2100053 +#define MASK_FCVT_WU_D 0xfff0007f +#define MATCH_FCLASS_D 0xe2001053 +#define MASK_FCLASS_D 0xfff0707f +#define MATCH_FCVT_D_W 0xd2000053 +#define MASK_FCVT_D_W 0xfff0007f +#define MATCH_FCVT_D_WU 0xd2100053 +#define MASK_FCVT_D_WU 0xfff0007f +#define MATCH_FLD 0x3007 +#define MASK_FLD 0x707f +#define MATCH_FSD 0x3027 +#define MASK_FSD 0x707f +#define MATCH_FMADD_D 0x2000043 +#define MASK_FMADD_D 0x600007f +#define MATCH_FMSUB_D 0x2000047 +#define MASK_FMSUB_D 0x600007f +#define MATCH_FNMSUB_D 0x200004b +#define MASK_FNMSUB_D 0x600007f +#define MATCH_FNMADD_D 0x200004f +#define MASK_FNMADD_D 0x600007f +#define MATCH_FCVT_L_D 0xc2200053 +#define MASK_FCVT_L_D 0xfff0007f +#define MATCH_FCVT_LU_D 0xc2300053 +#define MASK_FCVT_LU_D 0xfff0007f +#define MATCH_FMV_X_D 0xe2000053 +#define MASK_FMV_X_D 0xfff0707f +#define MATCH_FCVT_D_L 0xd2200053 +#define MASK_FCVT_D_L 0xfff0007f +#define MATCH_FCVT_D_LU 0xd2300053 +#define MASK_FCVT_D_LU 0xfff0007f +#define MATCH_FMV_D_X 0xf2000053 +#define MASK_FMV_D_X 0xfff0707f +#define MATCH_FADD_Q 0x6000053 +#define MASK_FADD_Q 0xfe00007f +#define MATCH_FSUB_Q 0xe000053 +#define MASK_FSUB_Q 0xfe00007f +#define MATCH_FMUL_Q 0x16000053 +#define MASK_FMUL_Q 0xfe00007f +#define MATCH_FDIV_Q 0x1e000053 +#define MASK_FDIV_Q 0xfe00007f +#define MATCH_FSGNJ_Q 0x26000053 +#define MASK_FSGNJ_Q 0xfe00707f +#define MATCH_FSGNJN_Q 0x26001053 +#define MASK_FSGNJN_Q 0xfe00707f +#define MATCH_FSGNJX_Q 0x26002053 +#define MASK_FSGNJX_Q 0xfe00707f +#define MATCH_FMIN_Q 0x2e000053 +#define MASK_FMIN_Q 0xfe00707f +#define MATCH_FMAX_Q 0x2e001053 +#define MASK_FMAX_Q 0xfe00707f +#define MATCH_FCVT_S_Q 0x40300053 +#define MASK_FCVT_S_Q 0xfff0007f +#define MATCH_FCVT_Q_S 0x46000053 +#define MASK_FCVT_Q_S 0xfff0007f +#define MATCH_FCVT_D_Q 0x42300053 +#define MASK_FCVT_D_Q 0xfff0007f +#define MATCH_FCVT_Q_D 0x46100053 +#define MASK_FCVT_Q_D 0xfff0007f +#define MATCH_FSQRT_Q 0x5e000053 +#define MASK_FSQRT_Q 0xfff0007f +#define MATCH_FLE_Q 0xa6000053 +#define MASK_FLE_Q 0xfe00707f +#define MATCH_FLT_Q 0xa6001053 +#define MASK_FLT_Q 0xfe00707f +#define MATCH_FEQ_Q 0xa6002053 +#define MASK_FEQ_Q 0xfe00707f +#define MATCH_FCVT_W_Q 0xc6000053 +#define MASK_FCVT_W_Q 0xfff0007f +#define MATCH_FCVT_WU_Q 0xc6100053 +#define MASK_FCVT_WU_Q 0xfff0007f +#define MATCH_FCLASS_Q 0xe6001053 +#define MASK_FCLASS_Q 0xfff0707f +#define MATCH_FCVT_Q_W 0xd6000053 +#define MASK_FCVT_Q_W 0xfff0007f +#define MATCH_FCVT_Q_WU 0xd6100053 +#define MASK_FCVT_Q_WU 0xfff0007f +#define MATCH_FLQ 0x4007 +#define MASK_FLQ 0x707f +#define MATCH_FSQ 0x4027 +#define MASK_FSQ 0x707f +#define MATCH_FMADD_Q 0x6000043 +#define MASK_FMADD_Q 0x600007f +#define MATCH_FMSUB_Q 0x6000047 +#define MASK_FMSUB_Q 0x600007f +#define MATCH_FNMSUB_Q 0x600004b +#define MASK_FNMSUB_Q 0x600007f +#define MATCH_FNMADD_Q 0x600004f +#define MASK_FNMADD_Q 0x600007f +#define MATCH_FCVT_L_Q 0xc6200053 +#define MASK_FCVT_L_Q 0xfff0007f +#define MATCH_FCVT_LU_Q 0xc6300053 +#define MASK_FCVT_LU_Q 0xfff0007f +#define MATCH_FCVT_Q_L 0xd6200053 +#define MASK_FCVT_Q_L 0xfff0007f +#define MATCH_FCVT_Q_LU 0xd6300053 +#define MASK_FCVT_Q_LU 0xfff0007f +#define MATCH_ANDN 0x40007033 +#define MASK_ANDN 0xfe00707f +#define MATCH_ORN 0x40006033 +#define MASK_ORN 0xfe00707f +#define MATCH_XNOR 0x40004033 +#define MASK_XNOR 0xfe00707f +#define MATCH_SLO 0x20001033 +#define MASK_SLO 0xfe00707f +#define MATCH_SRO 0x20005033 +#define MASK_SRO 0xfe00707f +#define MATCH_ROL 0x60001033 +#define MASK_ROL 0xfe00707f +#define MATCH_ROR 0x60005033 +#define MASK_ROR 0xfe00707f +#define MATCH_BCLR 0x48001033 +#define MASK_BCLR 0xfe00707f +#define MATCH_BSET 0x28001033 +#define MASK_BSET 0xfe00707f +#define MATCH_BINV 0x68001033 +#define MASK_BINV 0xfe00707f +#define MATCH_BEXT 0x48005033 +#define MASK_BEXT 0xfe00707f +#define MATCH_GORC 0x28005033 +#define MASK_GORC 0xfe00707f +#define MATCH_GREV 0x68005033 +#define MASK_GREV 0xfe00707f +#define MATCH_SLOI 0x20001013 +#define MASK_SLOI 0xfc00707f +#define MATCH_SROI 0x20005013 +#define MASK_SROI 0xfc00707f +#define MATCH_RORI 0x60005013 +#define MASK_RORI 0xfc00707f +#define MATCH_BCLRI 0x48001013 +#define MASK_BCLRI 0xfc00707f +#define MATCH_BSETI 0x28001013 +#define MASK_BSETI 0xfc00707f +#define MATCH_BINVI 0x68001013 +#define MASK_BINVI 0xfc00707f +#define MATCH_BEXTI 0x48005013 +#define MASK_BEXTI 0xfc00707f +#define MATCH_GORCI 0x28005013 +#define MASK_GORCI 0xfc00707f +#define MATCH_GREVI 0x68005013 +#define MASK_GREVI 0xfc00707f +#define MATCH_CMIX 0x6001033 +#define MASK_CMIX 0x600707f +#define MATCH_CMOV 0x6005033 +#define MASK_CMOV 0x600707f +#define MATCH_FSL 0x4001033 +#define MASK_FSL 0x600707f +#define MATCH_FSR 0x4005033 +#define MASK_FSR 0x600707f +#define MATCH_FSRI 0x4005013 +#define MASK_FSRI 0x400707f +#define MATCH_CLZ 0x60001013 +#define MASK_CLZ 0xfff0707f +#define MATCH_CTZ 0x60101013 +#define MASK_CTZ 0xfff0707f +#define MATCH_CPOP 0x60201013 +#define MASK_CPOP 0xfff0707f +#define MATCH_SEXT_B 0x60401013 +#define MASK_SEXT_B 0xfff0707f +#define MATCH_SEXT_H 0x60501013 +#define MASK_SEXT_H 0xfff0707f +#define MATCH_CRC32_B 0x61001013 +#define MASK_CRC32_B 0xfff0707f +#define MATCH_CRC32_H 0x61101013 +#define MASK_CRC32_H 0xfff0707f +#define MATCH_CRC32_W 0x61201013 +#define MASK_CRC32_W 0xfff0707f +#define MATCH_CRC32C_B 0x61801013 +#define MASK_CRC32C_B 0xfff0707f +#define MATCH_CRC32C_H 0x61901013 +#define MASK_CRC32C_H 0xfff0707f +#define MATCH_CRC32C_W 0x61a01013 +#define MASK_CRC32C_W 0xfff0707f +#define MATCH_SH1ADD 0x20002033 +#define MASK_SH1ADD 0xfe00707f +#define MATCH_SH2ADD 0x20004033 +#define MASK_SH2ADD 0xfe00707f +#define MATCH_SH3ADD 0x20006033 +#define MASK_SH3ADD 0xfe00707f +#define MATCH_CLMUL 0xa001033 +#define MASK_CLMUL 0xfe00707f +#define MATCH_CLMULR 0xa002033 +#define MASK_CLMULR 0xfe00707f +#define MATCH_CLMULH 0xa003033 +#define MASK_CLMULH 0xfe00707f +#define MATCH_MIN 0xa004033 +#define MASK_MIN 0xfe00707f +#define MATCH_MINU 0xa005033 +#define MASK_MINU 0xfe00707f +#define MATCH_MAX 0xa006033 +#define MASK_MAX 0xfe00707f +#define MATCH_MAXU 0xa007033 +#define MASK_MAXU 0xfe00707f +#define MATCH_SHFL 0x8001033 +#define MASK_SHFL 0xfe00707f +#define MATCH_UNSHFL 0x8005033 +#define MASK_UNSHFL 0xfe00707f +#define MATCH_BCOMPRESS 0x8006033 +#define MASK_BCOMPRESS 0xfe00707f +#define MATCH_BDECOMPRESS 0x48006033 +#define MASK_BDECOMPRESS 0xfe00707f +#define MATCH_PACK 0x8004033 +#define MASK_PACK 0xfe00707f +#define MATCH_PACKU 0x48004033 +#define MASK_PACKU 0xfe00707f +#define MATCH_PACKH 0x8007033 +#define MASK_PACKH 0xfe00707f +#define MATCH_BFP 0x48007033 +#define MASK_BFP 0xfe00707f +#define MATCH_SHFLI 0x8001013 +#define MASK_SHFLI 0xfe00707f +#define MATCH_UNSHFLI 0x8005013 +#define MASK_UNSHFLI 0xfe00707f +#define MATCH_XPERM4 0x28002033 +#define MASK_XPERM4 0xfe00707f +#define MATCH_XPERM8 0x28004033 +#define MASK_XPERM8 0xfe00707f +#define MATCH_XPERM16 0x28006033 +#define MASK_XPERM16 0xfe00707f +#define MATCH_BMATFLIP 0x60301013 +#define MASK_BMATFLIP 0xfff0707f +#define MATCH_CRC32_D 0x61301013 +#define MASK_CRC32_D 0xfff0707f +#define MATCH_CRC32C_D 0x61b01013 +#define MASK_CRC32C_D 0xfff0707f +#define MATCH_BMATOR 0x8003033 +#define MASK_BMATOR 0xfe00707f +#define MATCH_BMATXOR 0x48003033 +#define MASK_BMATXOR 0xfe00707f +#define MATCH_SLLI_UW 0x800101b +#define MASK_SLLI_UW 0xfc00707f +#define MATCH_ADD_UW 0x800003b +#define MASK_ADD_UW 0xfe00707f +#define MATCH_SLOW 0x2000103b +#define MASK_SLOW 0xfe00707f +#define MATCH_SROW 0x2000503b +#define MASK_SROW 0xfe00707f +#define MATCH_ROLW 0x6000103b +#define MASK_ROLW 0xfe00707f +#define MATCH_RORW 0x6000503b +#define MASK_RORW 0xfe00707f +#define MATCH_GORCW 0x2800503b +#define MASK_GORCW 0xfe00707f +#define MATCH_GREVW 0x6800503b +#define MASK_GREVW 0xfe00707f +#define MATCH_SLOIW 0x2000101b +#define MASK_SLOIW 0xfe00707f +#define MATCH_SROIW 0x2000501b +#define MASK_SROIW 0xfe00707f +#define MATCH_RORIW 0x6000501b +#define MASK_RORIW 0xfe00707f +#define MATCH_GORCIW 0x2800501b +#define MASK_GORCIW 0xfe00707f +#define MATCH_GREVIW 0x6800501b +#define MASK_GREVIW 0xfe00707f +#define MATCH_FSLW 0x400103b +#define MASK_FSLW 0x600707f +#define MATCH_FSRW 0x400503b +#define MASK_FSRW 0x600707f +#define MATCH_FSRIW 0x400501b +#define MASK_FSRIW 0x600707f +#define MATCH_CLZW 0x6000101b +#define MASK_CLZW 0xfff0707f +#define MATCH_CTZW 0x6010101b +#define MASK_CTZW 0xfff0707f +#define MATCH_CPOPW 0x6020101b +#define MASK_CPOPW 0xfff0707f +#define MATCH_SH1ADD_UW 0x2000203b +#define MASK_SH1ADD_UW 0xfe00707f +#define MATCH_SH2ADD_UW 0x2000403b +#define MASK_SH2ADD_UW 0xfe00707f +#define MATCH_SH3ADD_UW 0x2000603b +#define MASK_SH3ADD_UW 0xfe00707f +#define MATCH_SHFLW 0x800103b +#define MASK_SHFLW 0xfe00707f +#define MATCH_UNSHFLW 0x800503b +#define MASK_UNSHFLW 0xfe00707f +#define MATCH_BCOMPRESSW 0x800603b +#define MASK_BCOMPRESSW 0xfe00707f +#define MATCH_BDECOMPRESSW 0x4800603b +#define MASK_BDECOMPRESSW 0xfe00707f +#define MATCH_PACKW 0x800403b +#define MASK_PACKW 0xfe00707f +#define MATCH_PACKUW 0x4800403b +#define MASK_PACKUW 0xfe00707f +#define MATCH_BFPW 0x4800703b +#define MASK_BFPW 0xfe00707f +#define MATCH_XPERM32 0x28000033 +#define MASK_XPERM32 0xfe00707f +#define MATCH_ECALL 0x73 +#define MASK_ECALL 0xffffffff +#define MATCH_EBREAK 0x100073 +#define MASK_EBREAK 0xffffffff +#define MATCH_SRET 0x10200073 +#define MASK_SRET 0xffffffff +#define MATCH_MRET 0x30200073 +#define MASK_MRET 0xffffffff +#define MATCH_DRET 0x7b200073 +#define MASK_DRET 0xffffffff +#define MATCH_SFENCE_VMA 0x12000073 +#define MASK_SFENCE_VMA 0xfe007fff +#define MATCH_WFI 0x10500073 +#define MASK_WFI 0xffffffff +#define MATCH_CSRRW 0x1073 +#define MASK_CSRRW 0x707f +#define MATCH_CSRRS 0x2073 +#define MASK_CSRRS 0x707f +#define MATCH_CSRRC 0x3073 +#define MASK_CSRRC 0x707f +#define MATCH_CSRRWI 0x5073 +#define MASK_CSRRWI 0x707f +#define MATCH_CSRRSI 0x6073 +#define MASK_CSRRSI 0x707f +#define MATCH_CSRRCI 0x7073 +#define MASK_CSRRCI 0x707f +#define MATCH_SINVAL_VMA 0x16000073 +#define MASK_SINVAL_VMA 0xfe007fff +#define MATCH_SFENCE_W_INVAL 0x18000073 +#define MASK_SFENCE_W_INVAL 0xffffffff +#define MATCH_SFENCE_INVAL_IR 0x18100073 +#define MASK_SFENCE_INVAL_IR 0xffffffff +#define MATCH_HINVAL_VVMA 0x26000073 +#define MASK_HINVAL_VVMA 0xfe007fff +#define MATCH_HINVAL_GVMA 0x66000073 +#define MASK_HINVAL_GVMA 0xfe007fff +#define MATCH_FADD_H 0x4000053 +#define MASK_FADD_H 0xfe00007f +#define MATCH_FSUB_H 0xc000053 +#define MASK_FSUB_H 0xfe00007f +#define MATCH_FMUL_H 0x14000053 +#define MASK_FMUL_H 0xfe00007f +#define MATCH_FDIV_H 0x1c000053 +#define MASK_FDIV_H 0xfe00007f +#define MATCH_FSGNJ_H 0x24000053 +#define MASK_FSGNJ_H 0xfe00707f +#define MATCH_FSGNJN_H 0x24001053 +#define MASK_FSGNJN_H 0xfe00707f +#define MATCH_FSGNJX_H 0x24002053 +#define MASK_FSGNJX_H 0xfe00707f +#define MATCH_FMIN_H 0x2c000053 +#define MASK_FMIN_H 0xfe00707f +#define MATCH_FMAX_H 0x2c001053 +#define MASK_FMAX_H 0xfe00707f +#define MATCH_FCVT_H_S 0x44000053 +#define MASK_FCVT_H_S 0xfff0007f +#define MATCH_FCVT_S_H 0x40200053 +#define MASK_FCVT_S_H 0xfff0007f +#define MATCH_FSQRT_H 0x5c000053 +#define MASK_FSQRT_H 0xfff0007f +#define MATCH_FLE_H 0xa4000053 +#define MASK_FLE_H 0xfe00707f +#define MATCH_FLT_H 0xa4001053 +#define MASK_FLT_H 0xfe00707f +#define MATCH_FEQ_H 0xa4002053 +#define MASK_FEQ_H 0xfe00707f +#define MATCH_FCVT_W_H 0xc4000053 +#define MASK_FCVT_W_H 0xfff0007f +#define MATCH_FCVT_WU_H 0xc4100053 +#define MASK_FCVT_WU_H 0xfff0007f +#define MATCH_FMV_X_H 0xe4000053 +#define MASK_FMV_X_H 0xfff0707f +#define MATCH_FCLASS_H 0xe4001053 +#define MASK_FCLASS_H 0xfff0707f +#define MATCH_FCVT_H_W 0xd4000053 +#define MASK_FCVT_H_W 0xfff0007f +#define MATCH_FCVT_H_WU 0xd4100053 +#define MASK_FCVT_H_WU 0xfff0007f +#define MATCH_FMV_H_X 0xf4000053 +#define MASK_FMV_H_X 0xfff0707f +#define MATCH_FLH 0x1007 +#define MASK_FLH 0x707f +#define MATCH_FSH 0x1027 +#define MASK_FSH 0x707f +#define MATCH_FMADD_H 0x4000043 +#define MASK_FMADD_H 0x600007f +#define MATCH_FMSUB_H 0x4000047 +#define MASK_FMSUB_H 0x600007f +#define MATCH_FNMSUB_H 0x400004b +#define MASK_FNMSUB_H 0x600007f +#define MATCH_FNMADD_H 0x400004f +#define MASK_FNMADD_H 0x600007f +#define MATCH_FCVT_H_D 0x44100053 +#define MASK_FCVT_H_D 0xfff0007f +#define MATCH_FCVT_D_H 0x42200053 +#define MASK_FCVT_D_H 0xfff0007f +#define MATCH_FCVT_H_Q 0x44300053 +#define MASK_FCVT_H_Q 0xfff0007f +#define MATCH_FCVT_Q_H 0x46200053 +#define MASK_FCVT_Q_H 0xfff0007f +#define MATCH_FCVT_L_H 0xc4200053 +#define MASK_FCVT_L_H 0xfff0007f +#define MATCH_FCVT_LU_H 0xc4300053 +#define MASK_FCVT_LU_H 0xfff0007f +#define MATCH_FCVT_H_L 0xd4200053 +#define MASK_FCVT_H_L 0xfff0007f +#define MATCH_FCVT_H_LU 0xd4300053 +#define MASK_FCVT_H_LU 0xfff0007f +#define MATCH_SM4ED 0x30000033 +#define MASK_SM4ED 0x3e00707f +#define MATCH_SM4KS 0x34000033 +#define MASK_SM4KS 0x3e00707f +#define MATCH_SM3P0 0x10801013 +#define MASK_SM3P0 0xfff0707f +#define MATCH_SM3P1 0x10901013 +#define MASK_SM3P1 0xfff0707f +#define MATCH_SHA256SUM0 0x10001013 +#define MASK_SHA256SUM0 0xfff0707f +#define MATCH_SHA256SUM1 0x10101013 +#define MASK_SHA256SUM1 0xfff0707f +#define MATCH_SHA256SIG0 0x10201013 +#define MASK_SHA256SIG0 0xfff0707f +#define MATCH_SHA256SIG1 0x10301013 +#define MASK_SHA256SIG1 0xfff0707f +#define MATCH_AES32ESMI 0x26000033 +#define MASK_AES32ESMI 0x3e00707f +#define MATCH_AES32ESI 0x22000033 +#define MASK_AES32ESI 0x3e00707f +#define MATCH_AES32DSMI 0x2e000033 +#define MASK_AES32DSMI 0x3e00707f +#define MATCH_AES32DSI 0x2a000033 +#define MASK_AES32DSI 0x3e00707f +#define MATCH_SHA512SUM0R 0x50000033 +#define MASK_SHA512SUM0R 0xfe00707f +#define MATCH_SHA512SUM1R 0x52000033 +#define MASK_SHA512SUM1R 0xfe00707f +#define MATCH_SHA512SIG0L 0x54000033 +#define MASK_SHA512SIG0L 0xfe00707f +#define MATCH_SHA512SIG0H 0x5c000033 +#define MASK_SHA512SIG0H 0xfe00707f +#define MATCH_SHA512SIG1L 0x56000033 +#define MASK_SHA512SIG1L 0xfe00707f +#define MATCH_SHA512SIG1H 0x5e000033 +#define MASK_SHA512SIG1H 0xfe00707f +#define MATCH_AES64KS1I 0x31001013 +#define MASK_AES64KS1I 0xff00707f +#define MATCH_AES64IM 0x30001013 +#define MASK_AES64IM 0xfff0707f +#define MATCH_AES64KS2 0x7e000033 +#define MASK_AES64KS2 0xfe00707f +#define MATCH_AES64ESM 0x36000033 +#define MASK_AES64ESM 0xfe00707f +#define MATCH_AES64ES 0x32000033 +#define MASK_AES64ES 0xfe00707f +#define MATCH_AES64DSM 0x3e000033 +#define MASK_AES64DSM 0xfe00707f +#define MATCH_AES64DS 0x3a000033 +#define MASK_AES64DS 0xfe00707f +#define MATCH_SHA512SUM0 0x10401013 +#define MASK_SHA512SUM0 0xfff0707f +#define MATCH_SHA512SUM1 0x10501013 +#define MASK_SHA512SUM1 0xfff0707f +#define MATCH_SHA512SIG0 0x10601013 +#define MASK_SHA512SIG0 0xfff0707f +#define MATCH_SHA512SIG1 0x10701013 +#define MASK_SHA512SIG1 0xfff0707f +#define MATCH_CBO_CLEAN 0x10200f +#define MASK_CBO_CLEAN 0xfff07fff +#define MATCH_CBO_FLUSH 0x20200f +#define MASK_CBO_FLUSH 0xfff07fff +#define MATCH_CBO_INVAL 0x200f +#define MASK_CBO_INVAL 0xfff07fff +#define MATCH_CBO_ZERO 0x40200f +#define MASK_CBO_ZERO 0xfff07fff +#define MATCH_PREFETCH_I 0x6013 +#define MASK_PREFETCH_I 0x1f07fff +#define MATCH_PREFETCH_R 0x106013 +#define MASK_PREFETCH_R 0x1f07fff +#define MATCH_PREFETCH_W 0x306013 +#define MASK_PREFETCH_W 0x1f07fff +#define MATCH_C_NOP 0x1 +#define MASK_C_NOP 0xffff +#define MATCH_C_ADDI16SP 0x6101 +#define MASK_C_ADDI16SP 0xef83 +#define MATCH_C_JR 0x8002 +#define MASK_C_JR 0xf07f +#define MATCH_C_JALR 0x9002 +#define MASK_C_JALR 0xf07f +#define MATCH_C_EBREAK 0x9002 +#define MASK_C_EBREAK 0xffff +#define MATCH_C_ADDI4SPN 0x0 +#define MASK_C_ADDI4SPN 0xe003 +#define MATCH_C_FLD 0x2000 +#define MASK_C_FLD 0xe003 +#define MATCH_C_LW 0x4000 +#define MASK_C_LW 0xe003 +#define MATCH_C_FLW 0x6000 +#define MASK_C_FLW 0xe003 +#define MATCH_C_FSD 0xa000 +#define MASK_C_FSD 0xe003 +#define MATCH_C_SW 0xc000 +#define MASK_C_SW 0xe003 +#define MATCH_C_FSW 0xe000 +#define MASK_C_FSW 0xe003 +#define MATCH_C_ADDI 0x1 +#define MASK_C_ADDI 0xe003 +#define MATCH_C_JAL 0x2001 +#define MASK_C_JAL 0xe003 +#define MATCH_C_LI 0x4001 +#define MASK_C_LI 0xe003 +#define MATCH_C_LUI 0x6001 +#define MASK_C_LUI 0xe003 +#define MATCH_C_SRLI 0x8001 +#define MASK_C_SRLI 0xec03 +#define MATCH_C_SRAI 0x8401 +#define MASK_C_SRAI 0xec03 +#define MATCH_C_ANDI 0x8801 +#define MASK_C_ANDI 0xec03 +#define MATCH_C_SUB 0x8c01 +#define MASK_C_SUB 0xfc63 +#define MATCH_C_XOR 0x8c21 +#define MASK_C_XOR 0xfc63 +#define MATCH_C_OR 0x8c41 +#define MASK_C_OR 0xfc63 +#define MATCH_C_AND 0x8c61 +#define MASK_C_AND 0xfc63 +#define MATCH_C_J 0xa001 +#define MASK_C_J 0xe003 +#define MATCH_C_BEQZ 0xc001 +#define MASK_C_BEQZ 0xe003 +#define MATCH_C_BNEZ 0xe001 +#define MASK_C_BNEZ 0xe003 +#define MATCH_C_SLLI 0x2 +#define MASK_C_SLLI 0xe003 +#define MATCH_C_FLDSP 0x2002 +#define MASK_C_FLDSP 0xe003 +#define MATCH_C_LWSP 0x4002 +#define MASK_C_LWSP 0xe003 +#define MATCH_C_FLWSP 0x6002 +#define MASK_C_FLWSP 0xe003 +#define MATCH_C_MV 0x8002 +#define MASK_C_MV 0xf003 +#define MATCH_C_ADD 0x9002 +#define MASK_C_ADD 0xf003 +#define MATCH_C_FSDSP 0xa002 +#define MASK_C_FSDSP 0xe003 +#define MATCH_C_SWSP 0xc002 +#define MASK_C_SWSP 0xe003 +#define MATCH_C_FSWSP 0xe002 +#define MASK_C_FSWSP 0xe003 +#define MATCH_C_SRLI_RV32 0x8001 +#define MASK_C_SRLI_RV32 0xfc03 +#define MATCH_C_SRAI_RV32 0x8401 +#define MASK_C_SRAI_RV32 0xfc03 +#define MATCH_C_SLLI_RV32 0x2 +#define MASK_C_SLLI_RV32 0xf003 +#define MATCH_C_LD 0x6000 +#define MASK_C_LD 0xe003 +#define MATCH_C_SD 0xe000 +#define MASK_C_SD 0xe003 +#define MATCH_C_SUBW 0x9c01 +#define MASK_C_SUBW 0xfc63 +#define MATCH_C_ADDW 0x9c21 +#define MASK_C_ADDW 0xfc63 +#define MATCH_C_ADDIW 0x2001 +#define MASK_C_ADDIW 0xe003 +#define MATCH_C_LDSP 0x6002 +#define MASK_C_LDSP 0xe003 +#define MATCH_C_SDSP 0xe002 +#define MASK_C_SDSP 0xe003 +#define MATCH_CUSTOM0 0xb +#define MASK_CUSTOM0 0x707f +#define MATCH_CUSTOM0_RS1 0x200b +#define MASK_CUSTOM0_RS1 0x707f +#define MATCH_CUSTOM0_RS1_RS2 0x300b +#define MASK_CUSTOM0_RS1_RS2 0x707f +#define MATCH_CUSTOM0_RD 0x400b +#define MASK_CUSTOM0_RD 0x707f +#define MATCH_CUSTOM0_RD_RS1 0x600b +#define MASK_CUSTOM0_RD_RS1 0x707f +#define MATCH_CUSTOM0_RD_RS1_RS2 0x700b +#define MASK_CUSTOM0_RD_RS1_RS2 0x707f +#define MATCH_CUSTOM1 0x2b +#define MASK_CUSTOM1 0x707f +#define MATCH_CUSTOM1_RS1 0x202b +#define MASK_CUSTOM1_RS1 0x707f +#define MATCH_CUSTOM1_RS1_RS2 0x302b +#define MASK_CUSTOM1_RS1_RS2 0x707f +#define MATCH_CUSTOM1_RD 0x402b +#define MASK_CUSTOM1_RD 0x707f +#define MATCH_CUSTOM1_RD_RS1 0x602b +#define MASK_CUSTOM1_RD_RS1 0x707f +#define MATCH_CUSTOM1_RD_RS1_RS2 0x702b +#define MASK_CUSTOM1_RD_RS1_RS2 0x707f +#define MATCH_CUSTOM2 0x5b +#define MASK_CUSTOM2 0x707f +#define MATCH_CUSTOM2_RS1 0x205b +#define MASK_CUSTOM2_RS1 0x707f +#define MATCH_CUSTOM2_RS1_RS2 0x305b +#define MASK_CUSTOM2_RS1_RS2 0x707f +#define MATCH_CUSTOM2_RD 0x405b +#define MASK_CUSTOM2_RD 0x707f +#define MATCH_CUSTOM2_RD_RS1 0x605b +#define MASK_CUSTOM2_RD_RS1 0x707f +#define MATCH_CUSTOM2_RD_RS1_RS2 0x705b +#define MASK_CUSTOM2_RD_RS1_RS2 0x707f +#define MATCH_CUSTOM3 0x7b +#define MASK_CUSTOM3 0x707f +#define MATCH_CUSTOM3_RS1 0x207b +#define MASK_CUSTOM3_RS1 0x707f +#define MATCH_CUSTOM3_RS1_RS2 0x307b +#define MASK_CUSTOM3_RS1_RS2 0x707f +#define MATCH_CUSTOM3_RD 0x407b +#define MASK_CUSTOM3_RD 0x707f +#define MATCH_CUSTOM3_RD_RS1 0x607b +#define MASK_CUSTOM3_RD_RS1 0x707f +#define MATCH_CUSTOM3_RD_RS1_RS2 0x707b +#define MASK_CUSTOM3_RD_RS1_RS2 0x707f +#define MATCH_VSETIVLI 0xc0007057 +#define MASK_VSETIVLI 0xc000707f +#define MATCH_VSETVLI 0x7057 +#define MASK_VSETVLI 0x8000707f +#define MATCH_VSETVL 0x80007057 +#define MASK_VSETVL 0xfe00707f +#define MATCH_VLM_V 0x2b00007 +#define MASK_VLM_V 0xfff0707f +#define MATCH_VSM_V 0x2b00027 +#define MASK_VSM_V 0xfff0707f +#define MATCH_VLE8_V 0x7 +#define MASK_VLE8_V 0x1df0707f +#define MATCH_VLE16_V 0x5007 +#define MASK_VLE16_V 0x1df0707f +#define MATCH_VLE32_V 0x6007 +#define MASK_VLE32_V 0x1df0707f +#define MATCH_VLE64_V 0x7007 +#define MASK_VLE64_V 0x1df0707f +#define MATCH_VLE128_V 0x10000007 +#define MASK_VLE128_V 0x1df0707f +#define MATCH_VLE256_V 0x10005007 +#define MASK_VLE256_V 0x1df0707f +#define MATCH_VLE512_V 0x10006007 +#define MASK_VLE512_V 0x1df0707f +#define MATCH_VLE1024_V 0x10007007 +#define MASK_VLE1024_V 0x1df0707f +#define MATCH_VSE8_V 0x27 +#define MASK_VSE8_V 0x1df0707f +#define MATCH_VSE16_V 0x5027 +#define MASK_VSE16_V 0x1df0707f +#define MATCH_VSE32_V 0x6027 +#define MASK_VSE32_V 0x1df0707f +#define MATCH_VSE64_V 0x7027 +#define MASK_VSE64_V 0x1df0707f +#define MATCH_VSE128_V 0x10000027 +#define MASK_VSE128_V 0x1df0707f +#define MATCH_VSE256_V 0x10005027 +#define MASK_VSE256_V 0x1df0707f +#define MATCH_VSE512_V 0x10006027 +#define MASK_VSE512_V 0x1df0707f +#define MATCH_VSE1024_V 0x10007027 +#define MASK_VSE1024_V 0x1df0707f +#define MATCH_VLUXEI8_V 0x4000007 +#define MASK_VLUXEI8_V 0x1c00707f +#define MATCH_VLUXEI16_V 0x4005007 +#define MASK_VLUXEI16_V 0x1c00707f +#define MATCH_VLUXEI32_V 0x4006007 +#define MASK_VLUXEI32_V 0x1c00707f +#define MATCH_VLUXEI64_V 0x4007007 +#define MASK_VLUXEI64_V 0x1c00707f +#define MATCH_VLUXEI128_V 0x14000007 +#define MASK_VLUXEI128_V 0x1c00707f +#define MATCH_VLUXEI256_V 0x14005007 +#define MASK_VLUXEI256_V 0x1c00707f +#define MATCH_VLUXEI512_V 0x14006007 +#define MASK_VLUXEI512_V 0x1c00707f +#define MATCH_VLUXEI1024_V 0x14007007 +#define MASK_VLUXEI1024_V 0x1c00707f +#define MATCH_VSUXEI8_V 0x4000027 +#define MASK_VSUXEI8_V 0x1c00707f +#define MATCH_VSUXEI16_V 0x4005027 +#define MASK_VSUXEI16_V 0x1c00707f +#define MATCH_VSUXEI32_V 0x4006027 +#define MASK_VSUXEI32_V 0x1c00707f +#define MATCH_VSUXEI64_V 0x4007027 +#define MASK_VSUXEI64_V 0x1c00707f +#define MATCH_VSUXEI128_V 0x14000027 +#define MASK_VSUXEI128_V 0x1c00707f +#define MATCH_VSUXEI256_V 0x14005027 +#define MASK_VSUXEI256_V 0x1c00707f +#define MATCH_VSUXEI512_V 0x14006027 +#define MASK_VSUXEI512_V 0x1c00707f +#define MATCH_VSUXEI1024_V 0x14007027 +#define MASK_VSUXEI1024_V 0x1c00707f +#define MATCH_VLSE8_V 0x8000007 +#define MASK_VLSE8_V 0x1c00707f +#define MATCH_VLSE16_V 0x8005007 +#define MASK_VLSE16_V 0x1c00707f +#define MATCH_VLSE32_V 0x8006007 +#define MASK_VLSE32_V 0x1c00707f +#define MATCH_VLSE64_V 0x8007007 +#define MASK_VLSE64_V 0x1c00707f +#define MATCH_VLSE128_V 0x18000007 +#define MASK_VLSE128_V 0x1c00707f +#define MATCH_VLSE256_V 0x18005007 +#define MASK_VLSE256_V 0x1c00707f +#define MATCH_VLSE512_V 0x18006007 +#define MASK_VLSE512_V 0x1c00707f +#define MATCH_VLSE1024_V 0x18007007 +#define MASK_VLSE1024_V 0x1c00707f +#define MATCH_VSSE8_V 0x8000027 +#define MASK_VSSE8_V 0x1c00707f +#define MATCH_VSSE16_V 0x8005027 +#define MASK_VSSE16_V 0x1c00707f +#define MATCH_VSSE32_V 0x8006027 +#define MASK_VSSE32_V 0x1c00707f +#define MATCH_VSSE64_V 0x8007027 +#define MASK_VSSE64_V 0x1c00707f +#define MATCH_VSSE128_V 0x18000027 +#define MASK_VSSE128_V 0x1c00707f +#define MATCH_VSSE256_V 0x18005027 +#define MASK_VSSE256_V 0x1c00707f +#define MATCH_VSSE512_V 0x18006027 +#define MASK_VSSE512_V 0x1c00707f +#define MATCH_VSSE1024_V 0x18007027 +#define MASK_VSSE1024_V 0x1c00707f +#define MATCH_VLOXEI8_V 0xc000007 +#define MASK_VLOXEI8_V 0x1c00707f +#define MATCH_VLOXEI16_V 0xc005007 +#define MASK_VLOXEI16_V 0x1c00707f +#define MATCH_VLOXEI32_V 0xc006007 +#define MASK_VLOXEI32_V 0x1c00707f +#define MATCH_VLOXEI64_V 0xc007007 +#define MASK_VLOXEI64_V 0x1c00707f +#define MATCH_VLOXEI128_V 0x1c000007 +#define MASK_VLOXEI128_V 0x1c00707f +#define MATCH_VLOXEI256_V 0x1c005007 +#define MASK_VLOXEI256_V 0x1c00707f +#define MATCH_VLOXEI512_V 0x1c006007 +#define MASK_VLOXEI512_V 0x1c00707f +#define MATCH_VLOXEI1024_V 0x1c007007 +#define MASK_VLOXEI1024_V 0x1c00707f +#define MATCH_VSOXEI8_V 0xc000027 +#define MASK_VSOXEI8_V 0x1c00707f +#define MATCH_VSOXEI16_V 0xc005027 +#define MASK_VSOXEI16_V 0x1c00707f +#define MATCH_VSOXEI32_V 0xc006027 +#define MASK_VSOXEI32_V 0x1c00707f +#define MATCH_VSOXEI64_V 0xc007027 +#define MASK_VSOXEI64_V 0x1c00707f +#define MATCH_VSOXEI128_V 0x1c000027 +#define MASK_VSOXEI128_V 0x1c00707f +#define MATCH_VSOXEI256_V 0x1c005027 +#define MASK_VSOXEI256_V 0x1c00707f +#define MATCH_VSOXEI512_V 0x1c006027 +#define MASK_VSOXEI512_V 0x1c00707f +#define MATCH_VSOXEI1024_V 0x1c007027 +#define MASK_VSOXEI1024_V 0x1c00707f +#define MATCH_VLE8FF_V 0x1000007 +#define MASK_VLE8FF_V 0x1df0707f +#define MATCH_VLE16FF_V 0x1005007 +#define MASK_VLE16FF_V 0x1df0707f +#define MATCH_VLE32FF_V 0x1006007 +#define MASK_VLE32FF_V 0x1df0707f +#define MATCH_VLE64FF_V 0x1007007 +#define MASK_VLE64FF_V 0x1df0707f +#define MATCH_VLE128FF_V 0x11000007 +#define MASK_VLE128FF_V 0x1df0707f +#define MATCH_VLE256FF_V 0x11005007 +#define MASK_VLE256FF_V 0x1df0707f +#define MATCH_VLE512FF_V 0x11006007 +#define MASK_VLE512FF_V 0x1df0707f +#define MATCH_VLE1024FF_V 0x11007007 +#define MASK_VLE1024FF_V 0x1df0707f +#define MATCH_VL1RE8_V 0x2800007 +#define MASK_VL1RE8_V 0xfff0707f +#define MATCH_VL1RE16_V 0x2805007 +#define MASK_VL1RE16_V 0xfff0707f +#define MATCH_VL1RE32_V 0x2806007 +#define MASK_VL1RE32_V 0xfff0707f +#define MATCH_VL1RE64_V 0x2807007 +#define MASK_VL1RE64_V 0xfff0707f +#define MATCH_VL2RE8_V 0x22800007 +#define MASK_VL2RE8_V 0xfff0707f +#define MATCH_VL2RE16_V 0x22805007 +#define MASK_VL2RE16_V 0xfff0707f +#define MATCH_VL2RE32_V 0x22806007 +#define MASK_VL2RE32_V 0xfff0707f +#define MATCH_VL2RE64_V 0x22807007 +#define MASK_VL2RE64_V 0xfff0707f +#define MATCH_VL4RE8_V 0x62800007 +#define MASK_VL4RE8_V 0xfff0707f +#define MATCH_VL4RE16_V 0x62805007 +#define MASK_VL4RE16_V 0xfff0707f +#define MATCH_VL4RE32_V 0x62806007 +#define MASK_VL4RE32_V 0xfff0707f +#define MATCH_VL4RE64_V 0x62807007 +#define MASK_VL4RE64_V 0xfff0707f +#define MATCH_VL8RE8_V 0xe2800007 +#define MASK_VL8RE8_V 0xfff0707f +#define MATCH_VL8RE16_V 0xe2805007 +#define MASK_VL8RE16_V 0xfff0707f +#define MATCH_VL8RE32_V 0xe2806007 +#define MASK_VL8RE32_V 0xfff0707f +#define MATCH_VL8RE64_V 0xe2807007 +#define MASK_VL8RE64_V 0xfff0707f +#define MATCH_VS1R_V 0x2800027 +#define MASK_VS1R_V 0xfff0707f +#define MATCH_VS2R_V 0x22800027 +#define MASK_VS2R_V 0xfff0707f +#define MATCH_VS4R_V 0x62800027 +#define MASK_VS4R_V 0xfff0707f +#define MATCH_VS8R_V 0xe2800027 +#define MASK_VS8R_V 0xfff0707f +#define MATCH_VFADD_VF 0x5057 +#define MASK_VFADD_VF 0xfc00707f +#define MATCH_VFSUB_VF 0x8005057 +#define MASK_VFSUB_VF 0xfc00707f +#define MATCH_VFMIN_VF 0x10005057 +#define MASK_VFMIN_VF 0xfc00707f +#define MATCH_VFMAX_VF 0x18005057 +#define MASK_VFMAX_VF 0xfc00707f +#define MATCH_VFSGNJ_VF 0x20005057 +#define MASK_VFSGNJ_VF 0xfc00707f +#define MATCH_VFSGNJN_VF 0x24005057 +#define MASK_VFSGNJN_VF 0xfc00707f +#define MATCH_VFSGNJX_VF 0x28005057 +#define MASK_VFSGNJX_VF 0xfc00707f +#define MATCH_VFSLIDE1UP_VF 0x38005057 +#define MASK_VFSLIDE1UP_VF 0xfc00707f +#define MATCH_VFSLIDE1DOWN_VF 0x3c005057 +#define MASK_VFSLIDE1DOWN_VF 0xfc00707f +#define MATCH_VFMV_S_F 0x42005057 +#define MASK_VFMV_S_F 0xfff0707f +#define MATCH_VFMERGE_VFM 0x5c005057 +#define MASK_VFMERGE_VFM 0xfe00707f +#define MATCH_VFMV_V_F 0x5e005057 +#define MASK_VFMV_V_F 0xfff0707f +#define MATCH_VMFEQ_VF 0x60005057 +#define MASK_VMFEQ_VF 0xfc00707f +#define MATCH_VMFLE_VF 0x64005057 +#define MASK_VMFLE_VF 0xfc00707f +#define MATCH_VMFLT_VF 0x6c005057 +#define MASK_VMFLT_VF 0xfc00707f +#define MATCH_VMFNE_VF 0x70005057 +#define MASK_VMFNE_VF 0xfc00707f +#define MATCH_VMFGT_VF 0x74005057 +#define MASK_VMFGT_VF 0xfc00707f +#define MATCH_VMFGE_VF 0x7c005057 +#define MASK_VMFGE_VF 0xfc00707f +#define MATCH_VFDIV_VF 0x80005057 +#define MASK_VFDIV_VF 0xfc00707f +#define MATCH_VFRDIV_VF 0x84005057 +#define MASK_VFRDIV_VF 0xfc00707f +#define MATCH_VFMUL_VF 0x90005057 +#define MASK_VFMUL_VF 0xfc00707f +#define MATCH_VFRSUB_VF 0x9c005057 +#define MASK_VFRSUB_VF 0xfc00707f +#define MATCH_VFMADD_VF 0xa0005057 +#define MASK_VFMADD_VF 0xfc00707f +#define MATCH_VFNMADD_VF 0xa4005057 +#define MASK_VFNMADD_VF 0xfc00707f +#define MATCH_VFMSUB_VF 0xa8005057 +#define MASK_VFMSUB_VF 0xfc00707f +#define MATCH_VFNMSUB_VF 0xac005057 +#define MASK_VFNMSUB_VF 0xfc00707f +#define MATCH_VFMACC_VF 0xb0005057 +#define MASK_VFMACC_VF 0xfc00707f +#define MATCH_VFNMACC_VF 0xb4005057 +#define MASK_VFNMACC_VF 0xfc00707f +#define MATCH_VFMSAC_VF 0xb8005057 +#define MASK_VFMSAC_VF 0xfc00707f +#define MATCH_VFNMSAC_VF 0xbc005057 +#define MASK_VFNMSAC_VF 0xfc00707f +#define MATCH_VFWADD_VF 0xc0005057 +#define MASK_VFWADD_VF 0xfc00707f +#define MATCH_VFWSUB_VF 0xc8005057 +#define MASK_VFWSUB_VF 0xfc00707f +#define MATCH_VFWADD_WF 0xd0005057 +#define MASK_VFWADD_WF 0xfc00707f +#define MATCH_VFWSUB_WF 0xd8005057 +#define MASK_VFWSUB_WF 0xfc00707f +#define MATCH_VFWMUL_VF 0xe0005057 +#define MASK_VFWMUL_VF 0xfc00707f +#define MATCH_VFWMACC_VF 0xf0005057 +#define MASK_VFWMACC_VF 0xfc00707f +#define MATCH_VFWNMACC_VF 0xf4005057 +#define MASK_VFWNMACC_VF 0xfc00707f +#define MATCH_VFWMSAC_VF 0xf8005057 +#define MASK_VFWMSAC_VF 0xfc00707f +#define MATCH_VFWNMSAC_VF 0xfc005057 +#define MASK_VFWNMSAC_VF 0xfc00707f +#define MATCH_VFADD_VV 0x1057 +#define MASK_VFADD_VV 0xfc00707f +#define MATCH_VFREDUSUM_VS 0x4001057 +#define MASK_VFREDUSUM_VS 0xfc00707f +#define MATCH_VFSUB_VV 0x8001057 +#define MASK_VFSUB_VV 0xfc00707f +#define MATCH_VFREDOSUM_VS 0xc001057 +#define MASK_VFREDOSUM_VS 0xfc00707f +#define MATCH_VFMIN_VV 0x10001057 +#define MASK_VFMIN_VV 0xfc00707f +#define MATCH_VFREDMIN_VS 0x14001057 +#define MASK_VFREDMIN_VS 0xfc00707f +#define MATCH_VFMAX_VV 0x18001057 +#define MASK_VFMAX_VV 0xfc00707f +#define MATCH_VFREDMAX_VS 0x1c001057 +#define MASK_VFREDMAX_VS 0xfc00707f +#define MATCH_VFSGNJ_VV 0x20001057 +#define MASK_VFSGNJ_VV 0xfc00707f +#define MATCH_VFSGNJN_VV 0x24001057 +#define MASK_VFSGNJN_VV 0xfc00707f +#define MATCH_VFSGNJX_VV 0x28001057 +#define MASK_VFSGNJX_VV 0xfc00707f +#define MATCH_VFMV_F_S 0x42001057 +#define MASK_VFMV_F_S 0xfe0ff07f +#define MATCH_VMFEQ_VV 0x60001057 +#define MASK_VMFEQ_VV 0xfc00707f +#define MATCH_VMFLE_VV 0x64001057 +#define MASK_VMFLE_VV 0xfc00707f +#define MATCH_VMFLT_VV 0x6c001057 +#define MASK_VMFLT_VV 0xfc00707f +#define MATCH_VMFNE_VV 0x70001057 +#define MASK_VMFNE_VV 0xfc00707f +#define MATCH_VFDIV_VV 0x80001057 +#define MASK_VFDIV_VV 0xfc00707f +#define MATCH_VFMUL_VV 0x90001057 +#define MASK_VFMUL_VV 0xfc00707f +#define MATCH_VFMADD_VV 0xa0001057 +#define MASK_VFMADD_VV 0xfc00707f +#define MATCH_VFNMADD_VV 0xa4001057 +#define MASK_VFNMADD_VV 0xfc00707f +#define MATCH_VFMSUB_VV 0xa8001057 +#define MASK_VFMSUB_VV 0xfc00707f +#define MATCH_VFNMSUB_VV 0xac001057 +#define MASK_VFNMSUB_VV 0xfc00707f +#define MATCH_VFMACC_VV 0xb0001057 +#define MASK_VFMACC_VV 0xfc00707f +#define MATCH_VFNMACC_VV 0xb4001057 +#define MASK_VFNMACC_VV 0xfc00707f +#define MATCH_VFMSAC_VV 0xb8001057 +#define MASK_VFMSAC_VV 0xfc00707f +#define MATCH_VFNMSAC_VV 0xbc001057 +#define MASK_VFNMSAC_VV 0xfc00707f +#define MATCH_VFCVT_XU_F_V 0x48001057 +#define MASK_VFCVT_XU_F_V 0xfc0ff07f +#define MATCH_VFCVT_X_F_V 0x48009057 +#define MASK_VFCVT_X_F_V 0xfc0ff07f +#define MATCH_VFCVT_F_XU_V 0x48011057 +#define MASK_VFCVT_F_XU_V 0xfc0ff07f +#define MATCH_VFCVT_F_X_V 0x48019057 +#define MASK_VFCVT_F_X_V 0xfc0ff07f +#define MATCH_VFCVT_RTZ_XU_F_V 0x48031057 +#define MASK_VFCVT_RTZ_XU_F_V 0xfc0ff07f +#define MATCH_VFCVT_RTZ_X_F_V 0x48039057 +#define MASK_VFCVT_RTZ_X_F_V 0xfc0ff07f +#define MATCH_VFWCVT_XU_F_V 0x48041057 +#define MASK_VFWCVT_XU_F_V 0xfc0ff07f +#define MATCH_VFWCVT_X_F_V 0x48049057 +#define MASK_VFWCVT_X_F_V 0xfc0ff07f +#define MATCH_VFWCVT_F_XU_V 0x48051057 +#define MASK_VFWCVT_F_XU_V 0xfc0ff07f +#define MATCH_VFWCVT_F_X_V 0x48059057 +#define MASK_VFWCVT_F_X_V 0xfc0ff07f +#define MATCH_VFWCVT_F_F_V 0x48061057 +#define MASK_VFWCVT_F_F_V 0xfc0ff07f +#define MATCH_VFWCVT_RTZ_XU_F_V 0x48071057 +#define MASK_VFWCVT_RTZ_XU_F_V 0xfc0ff07f +#define MATCH_VFWCVT_RTZ_X_F_V 0x48079057 +#define MASK_VFWCVT_RTZ_X_F_V 0xfc0ff07f +#define MATCH_VFNCVT_XU_F_W 0x48081057 +#define MASK_VFNCVT_XU_F_W 0xfc0ff07f +#define MATCH_VFNCVT_X_F_W 0x48089057 +#define MASK_VFNCVT_X_F_W 0xfc0ff07f +#define MATCH_VFNCVT_F_XU_W 0x48091057 +#define MASK_VFNCVT_F_XU_W 0xfc0ff07f +#define MATCH_VFNCVT_F_X_W 0x48099057 +#define MASK_VFNCVT_F_X_W 0xfc0ff07f +#define MATCH_VFNCVT_F_F_W 0x480a1057 +#define MASK_VFNCVT_F_F_W 0xfc0ff07f +#define MATCH_VFNCVT_ROD_F_F_W 0x480a9057 +#define MASK_VFNCVT_ROD_F_F_W 0xfc0ff07f +#define MATCH_VFNCVT_RTZ_XU_F_W 0x480b1057 +#define MASK_VFNCVT_RTZ_XU_F_W 0xfc0ff07f +#define MATCH_VFNCVT_RTZ_X_F_W 0x480b9057 +#define MASK_VFNCVT_RTZ_X_F_W 0xfc0ff07f +#define MATCH_VFSQRT_V 0x4c001057 +#define MASK_VFSQRT_V 0xfc0ff07f +#define MATCH_VFRSQRT7_V 0x4c021057 +#define MASK_VFRSQRT7_V 0xfc0ff07f +#define MATCH_VFREC7_V 0x4c029057 +#define MASK_VFREC7_V 0xfc0ff07f +#define MATCH_VFCLASS_V 0x4c081057 +#define MASK_VFCLASS_V 0xfc0ff07f +#define MATCH_VFWADD_VV 0xc0001057 +#define MASK_VFWADD_VV 0xfc00707f +#define MATCH_VFWREDUSUM_VS 0xc4001057 +#define MASK_VFWREDUSUM_VS 0xfc00707f +#define MATCH_VFWSUB_VV 0xc8001057 +#define MASK_VFWSUB_VV 0xfc00707f +#define MATCH_VFWREDOSUM_VS 0xcc001057 +#define MASK_VFWREDOSUM_VS 0xfc00707f +#define MATCH_VFWADD_WV 0xd0001057 +#define MASK_VFWADD_WV 0xfc00707f +#define MATCH_VFWSUB_WV 0xd8001057 +#define MASK_VFWSUB_WV 0xfc00707f +#define MATCH_VFWMUL_VV 0xe0001057 +#define MASK_VFWMUL_VV 0xfc00707f +#define MATCH_VFWMACC_VV 0xf0001057 +#define MASK_VFWMACC_VV 0xfc00707f +#define MATCH_VFWNMACC_VV 0xf4001057 +#define MASK_VFWNMACC_VV 0xfc00707f +#define MATCH_VFWMSAC_VV 0xf8001057 +#define MASK_VFWMSAC_VV 0xfc00707f +#define MATCH_VFWNMSAC_VV 0xfc001057 +#define MASK_VFWNMSAC_VV 0xfc00707f +#define MATCH_VADD_VX 0x4057 +#define MASK_VADD_VX 0xfc00707f +#define MATCH_VSUB_VX 0x8004057 +#define MASK_VSUB_VX 0xfc00707f +#define MATCH_VRSUB_VX 0xc004057 +#define MASK_VRSUB_VX 0xfc00707f +#define MATCH_VMINU_VX 0x10004057 +#define MASK_VMINU_VX 0xfc00707f +#define MATCH_VMIN_VX 0x14004057 +#define MASK_VMIN_VX 0xfc00707f +#define MATCH_VMAXU_VX 0x18004057 +#define MASK_VMAXU_VX 0xfc00707f +#define MATCH_VMAX_VX 0x1c004057 +#define MASK_VMAX_VX 0xfc00707f +#define MATCH_VAND_VX 0x24004057 +#define MASK_VAND_VX 0xfc00707f +#define MATCH_VOR_VX 0x28004057 +#define MASK_VOR_VX 0xfc00707f +#define MATCH_VXOR_VX 0x2c004057 +#define MASK_VXOR_VX 0xfc00707f +#define MATCH_VRGATHER_VX 0x30004057 +#define MASK_VRGATHER_VX 0xfc00707f +#define MATCH_VSLIDEUP_VX 0x38004057 +#define MASK_VSLIDEUP_VX 0xfc00707f +#define MATCH_VSLIDEDOWN_VX 0x3c004057 +#define MASK_VSLIDEDOWN_VX 0xfc00707f +#define MATCH_VADC_VXM 0x40004057 +#define MASK_VADC_VXM 0xfe00707f +#define MATCH_VMADC_VXM 0x44004057 +#define MASK_VMADC_VXM 0xfe00707f +#define MATCH_VMADC_VX 0x46004057 +#define MASK_VMADC_VX 0xfe00707f +#define MATCH_VSBC_VXM 0x48004057 +#define MASK_VSBC_VXM 0xfe00707f +#define MATCH_VMSBC_VXM 0x4c004057 +#define MASK_VMSBC_VXM 0xfe00707f +#define MATCH_VMSBC_VX 0x4e004057 +#define MASK_VMSBC_VX 0xfe00707f +#define MATCH_VMERGE_VXM 0x5c004057 +#define MASK_VMERGE_VXM 0xfe00707f +#define MATCH_VMV_V_X 0x5e004057 +#define MASK_VMV_V_X 0xfff0707f +#define MATCH_VMSEQ_VX 0x60004057 +#define MASK_VMSEQ_VX 0xfc00707f +#define MATCH_VMSNE_VX 0x64004057 +#define MASK_VMSNE_VX 0xfc00707f +#define MATCH_VMSLTU_VX 0x68004057 +#define MASK_VMSLTU_VX 0xfc00707f +#define MATCH_VMSLT_VX 0x6c004057 +#define MASK_VMSLT_VX 0xfc00707f +#define MATCH_VMSLEU_VX 0x70004057 +#define MASK_VMSLEU_VX 0xfc00707f +#define MATCH_VMSLE_VX 0x74004057 +#define MASK_VMSLE_VX 0xfc00707f +#define MATCH_VMSGTU_VX 0x78004057 +#define MASK_VMSGTU_VX 0xfc00707f +#define MATCH_VMSGT_VX 0x7c004057 +#define MASK_VMSGT_VX 0xfc00707f +#define MATCH_VSADDU_VX 0x80004057 +#define MASK_VSADDU_VX 0xfc00707f +#define MATCH_VSADD_VX 0x84004057 +#define MASK_VSADD_VX 0xfc00707f +#define MATCH_VSSUBU_VX 0x88004057 +#define MASK_VSSUBU_VX 0xfc00707f +#define MATCH_VSSUB_VX 0x8c004057 +#define MASK_VSSUB_VX 0xfc00707f +#define MATCH_VSLL_VX 0x94004057 +#define MASK_VSLL_VX 0xfc00707f +#define MATCH_VSMUL_VX 0x9c004057 +#define MASK_VSMUL_VX 0xfc00707f +#define MATCH_VSRL_VX 0xa0004057 +#define MASK_VSRL_VX 0xfc00707f +#define MATCH_VSRA_VX 0xa4004057 +#define MASK_VSRA_VX 0xfc00707f +#define MATCH_VSSRL_VX 0xa8004057 +#define MASK_VSSRL_VX 0xfc00707f +#define MATCH_VSSRA_VX 0xac004057 +#define MASK_VSSRA_VX 0xfc00707f +#define MATCH_VNSRL_WX 0xb0004057 +#define MASK_VNSRL_WX 0xfc00707f +#define MATCH_VNSRA_WX 0xb4004057 +#define MASK_VNSRA_WX 0xfc00707f +#define MATCH_VNCLIPU_WX 0xb8004057 +#define MASK_VNCLIPU_WX 0xfc00707f +#define MATCH_VNCLIP_WX 0xbc004057 +#define MASK_VNCLIP_WX 0xfc00707f +#define MATCH_VADD_VV 0x57 +#define MASK_VADD_VV 0xfc00707f +#define MATCH_VSUB_VV 0x8000057 +#define MASK_VSUB_VV 0xfc00707f +#define MATCH_VMINU_VV 0x10000057 +#define MASK_VMINU_VV 0xfc00707f +#define MATCH_VMIN_VV 0x14000057 +#define MASK_VMIN_VV 0xfc00707f +#define MATCH_VMAXU_VV 0x18000057 +#define MASK_VMAXU_VV 0xfc00707f +#define MATCH_VMAX_VV 0x1c000057 +#define MASK_VMAX_VV 0xfc00707f +#define MATCH_VAND_VV 0x24000057 +#define MASK_VAND_VV 0xfc00707f +#define MATCH_VOR_VV 0x28000057 +#define MASK_VOR_VV 0xfc00707f +#define MATCH_VXOR_VV 0x2c000057 +#define MASK_VXOR_VV 0xfc00707f +#define MATCH_VRGATHER_VV 0x30000057 +#define MASK_VRGATHER_VV 0xfc00707f +#define MATCH_VRGATHEREI16_VV 0x38000057 +#define MASK_VRGATHEREI16_VV 0xfc00707f +#define MATCH_VADC_VVM 0x40000057 +#define MASK_VADC_VVM 0xfe00707f +#define MATCH_VMADC_VVM 0x44000057 +#define MASK_VMADC_VVM 0xfe00707f +#define MATCH_VMADC_VV 0x46000057 +#define MASK_VMADC_VV 0xfe00707f +#define MATCH_VSBC_VVM 0x48000057 +#define MASK_VSBC_VVM 0xfe00707f +#define MATCH_VMSBC_VVM 0x4c000057 +#define MASK_VMSBC_VVM 0xfe00707f +#define MATCH_VMSBC_VV 0x4e000057 +#define MASK_VMSBC_VV 0xfe00707f +#define MATCH_VMERGE_VVM 0x5c000057 +#define MASK_VMERGE_VVM 0xfe00707f +#define MATCH_VMV_V_V 0x5e000057 +#define MASK_VMV_V_V 0xfff0707f +#define MATCH_VMSEQ_VV 0x60000057 +#define MASK_VMSEQ_VV 0xfc00707f +#define MATCH_VMSNE_VV 0x64000057 +#define MASK_VMSNE_VV 0xfc00707f +#define MATCH_VMSLTU_VV 0x68000057 +#define MASK_VMSLTU_VV 0xfc00707f +#define MATCH_VMSLT_VV 0x6c000057 +#define MASK_VMSLT_VV 0xfc00707f +#define MATCH_VMSLEU_VV 0x70000057 +#define MASK_VMSLEU_VV 0xfc00707f +#define MATCH_VMSLE_VV 0x74000057 +#define MASK_VMSLE_VV 0xfc00707f +#define MATCH_VSADDU_VV 0x80000057 +#define MASK_VSADDU_VV 0xfc00707f +#define MATCH_VSADD_VV 0x84000057 +#define MASK_VSADD_VV 0xfc00707f +#define MATCH_VSSUBU_VV 0x88000057 +#define MASK_VSSUBU_VV 0xfc00707f +#define MATCH_VSSUB_VV 0x8c000057 +#define MASK_VSSUB_VV 0xfc00707f +#define MATCH_VSLL_VV 0x94000057 +#define MASK_VSLL_VV 0xfc00707f +#define MATCH_VSMUL_VV 0x9c000057 +#define MASK_VSMUL_VV 0xfc00707f +#define MATCH_VSRL_VV 0xa0000057 +#define MASK_VSRL_VV 0xfc00707f +#define MATCH_VSRA_VV 0xa4000057 +#define MASK_VSRA_VV 0xfc00707f +#define MATCH_VSSRL_VV 0xa8000057 +#define MASK_VSSRL_VV 0xfc00707f +#define MATCH_VSSRA_VV 0xac000057 +#define MASK_VSSRA_VV 0xfc00707f +#define MATCH_VNSRL_WV 0xb0000057 +#define MASK_VNSRL_WV 0xfc00707f +#define MATCH_VNSRA_WV 0xb4000057 +#define MASK_VNSRA_WV 0xfc00707f +#define MATCH_VNCLIPU_WV 0xb8000057 +#define MASK_VNCLIPU_WV 0xfc00707f +#define MATCH_VNCLIP_WV 0xbc000057 +#define MASK_VNCLIP_WV 0xfc00707f +#define MATCH_VWREDSUMU_VS 0xc0000057 +#define MASK_VWREDSUMU_VS 0xfc00707f +#define MATCH_VWREDSUM_VS 0xc4000057 +#define MASK_VWREDSUM_VS 0xfc00707f +#define MATCH_VADD_VI 0x3057 +#define MASK_VADD_VI 0xfc00707f +#define MATCH_VRSUB_VI 0xc003057 +#define MASK_VRSUB_VI 0xfc00707f +#define MATCH_VAND_VI 0x24003057 +#define MASK_VAND_VI 0xfc00707f +#define MATCH_VOR_VI 0x28003057 +#define MASK_VOR_VI 0xfc00707f +#define MATCH_VXOR_VI 0x2c003057 +#define MASK_VXOR_VI 0xfc00707f +#define MATCH_VRGATHER_VI 0x30003057 +#define MASK_VRGATHER_VI 0xfc00707f +#define MATCH_VSLIDEUP_VI 0x38003057 +#define MASK_VSLIDEUP_VI 0xfc00707f +#define MATCH_VSLIDEDOWN_VI 0x3c003057 +#define MASK_VSLIDEDOWN_VI 0xfc00707f +#define MATCH_VADC_VIM 0x40003057 +#define MASK_VADC_VIM 0xfe00707f +#define MATCH_VMADC_VIM 0x44003057 +#define MASK_VMADC_VIM 0xfe00707f +#define MATCH_VMADC_VI 0x46003057 +#define MASK_VMADC_VI 0xfe00707f +#define MATCH_VMERGE_VIM 0x5c003057 +#define MASK_VMERGE_VIM 0xfe00707f +#define MATCH_VMV_V_I 0x5e003057 +#define MASK_VMV_V_I 0xfff0707f +#define MATCH_VMSEQ_VI 0x60003057 +#define MASK_VMSEQ_VI 0xfc00707f +#define MATCH_VMSNE_VI 0x64003057 +#define MASK_VMSNE_VI 0xfc00707f +#define MATCH_VMSLEU_VI 0x70003057 +#define MASK_VMSLEU_VI 0xfc00707f +#define MATCH_VMSLE_VI 0x74003057 +#define MASK_VMSLE_VI 0xfc00707f +#define MATCH_VMSGTU_VI 0x78003057 +#define MASK_VMSGTU_VI 0xfc00707f +#define MATCH_VMSGT_VI 0x7c003057 +#define MASK_VMSGT_VI 0xfc00707f +#define MATCH_VSADDU_VI 0x80003057 +#define MASK_VSADDU_VI 0xfc00707f +#define MATCH_VSADD_VI 0x84003057 +#define MASK_VSADD_VI 0xfc00707f +#define MATCH_VSLL_VI 0x94003057 +#define MASK_VSLL_VI 0xfc00707f +#define MATCH_VMV1R_V 0x9e003057 +#define MASK_VMV1R_V 0xfe0ff07f +#define MATCH_VMV2R_V 0x9e00b057 +#define MASK_VMV2R_V 0xfe0ff07f +#define MATCH_VMV4R_V 0x9e01b057 +#define MASK_VMV4R_V 0xfe0ff07f +#define MATCH_VMV8R_V 0x9e03b057 +#define MASK_VMV8R_V 0xfe0ff07f +#define MATCH_VSRL_VI 0xa0003057 +#define MASK_VSRL_VI 0xfc00707f +#define MATCH_VSRA_VI 0xa4003057 +#define MASK_VSRA_VI 0xfc00707f +#define MATCH_VSSRL_VI 0xa8003057 +#define MASK_VSSRL_VI 0xfc00707f +#define MATCH_VSSRA_VI 0xac003057 +#define MASK_VSSRA_VI 0xfc00707f +#define MATCH_VNSRL_WI 0xb0003057 +#define MASK_VNSRL_WI 0xfc00707f +#define MATCH_VNSRA_WI 0xb4003057 +#define MASK_VNSRA_WI 0xfc00707f +#define MATCH_VNCLIPU_WI 0xb8003057 +#define MASK_VNCLIPU_WI 0xfc00707f +#define MATCH_VNCLIP_WI 0xbc003057 +#define MASK_VNCLIP_WI 0xfc00707f +#define MATCH_VREDSUM_VS 0x2057 +#define MASK_VREDSUM_VS 0xfc00707f +#define MATCH_VREDAND_VS 0x4002057 +#define MASK_VREDAND_VS 0xfc00707f +#define MATCH_VREDOR_VS 0x8002057 +#define MASK_VREDOR_VS 0xfc00707f +#define MATCH_VREDXOR_VS 0xc002057 +#define MASK_VREDXOR_VS 0xfc00707f +#define MATCH_VREDMINU_VS 0x10002057 +#define MASK_VREDMINU_VS 0xfc00707f +#define MATCH_VREDMIN_VS 0x14002057 +#define MASK_VREDMIN_VS 0xfc00707f +#define MATCH_VREDMAXU_VS 0x18002057 +#define MASK_VREDMAXU_VS 0xfc00707f +#define MATCH_VREDMAX_VS 0x1c002057 +#define MASK_VREDMAX_VS 0xfc00707f +#define MATCH_VAADDU_VV 0x20002057 +#define MASK_VAADDU_VV 0xfc00707f +#define MATCH_VAADD_VV 0x24002057 +#define MASK_VAADD_VV 0xfc00707f +#define MATCH_VASUBU_VV 0x28002057 +#define MASK_VASUBU_VV 0xfc00707f +#define MATCH_VASUB_VV 0x2c002057 +#define MASK_VASUB_VV 0xfc00707f +#define MATCH_VMV_X_S 0x42002057 +#define MASK_VMV_X_S 0xfe0ff07f +#define MATCH_VZEXT_VF8 0x48012057 +#define MASK_VZEXT_VF8 0xfc0ff07f +#define MATCH_VSEXT_VF8 0x4801a057 +#define MASK_VSEXT_VF8 0xfc0ff07f +#define MATCH_VZEXT_VF4 0x48022057 +#define MASK_VZEXT_VF4 0xfc0ff07f +#define MATCH_VSEXT_VF4 0x4802a057 +#define MASK_VSEXT_VF4 0xfc0ff07f +#define MATCH_VZEXT_VF2 0x48032057 +#define MASK_VZEXT_VF2 0xfc0ff07f +#define MATCH_VSEXT_VF2 0x4803a057 +#define MASK_VSEXT_VF2 0xfc0ff07f +#define MATCH_VCOMPRESS_VM 0x5e002057 +#define MASK_VCOMPRESS_VM 0xfe00707f +#define MATCH_VMANDN_MM 0x60002057 +#define MASK_VMANDN_MM 0xfc00707f +#define MATCH_VMAND_MM 0x64002057 +#define MASK_VMAND_MM 0xfc00707f +#define MATCH_VMOR_MM 0x68002057 +#define MASK_VMOR_MM 0xfc00707f +#define MATCH_VMXOR_MM 0x6c002057 +#define MASK_VMXOR_MM 0xfc00707f +#define MATCH_VMORN_MM 0x70002057 +#define MASK_VMORN_MM 0xfc00707f +#define MATCH_VMNAND_MM 0x74002057 +#define MASK_VMNAND_MM 0xfc00707f +#define MATCH_VMNOR_MM 0x78002057 +#define MASK_VMNOR_MM 0xfc00707f +#define MATCH_VMXNOR_MM 0x7c002057 +#define MASK_VMXNOR_MM 0xfc00707f +#define MATCH_VMSBF_M 0x5000a057 +#define MASK_VMSBF_M 0xfc0ff07f +#define MATCH_VMSOF_M 0x50012057 +#define MASK_VMSOF_M 0xfc0ff07f +#define MATCH_VMSIF_M 0x5001a057 +#define MASK_VMSIF_M 0xfc0ff07f +#define MATCH_VIOTA_M 0x50082057 +#define MASK_VIOTA_M 0xfc0ff07f +#define MATCH_VID_V 0x5008a057 +#define MASK_VID_V 0xfdfff07f +#define MATCH_VCPOP_M 0x40082057 +#define MASK_VCPOP_M 0xfc0ff07f +#define MATCH_VFIRST_M 0x4008a057 +#define MASK_VFIRST_M 0xfc0ff07f +#define MATCH_VDIVU_VV 0x80002057 +#define MASK_VDIVU_VV 0xfc00707f +#define MATCH_VDIV_VV 0x84002057 +#define MASK_VDIV_VV 0xfc00707f +#define MATCH_VREMU_VV 0x88002057 +#define MASK_VREMU_VV 0xfc00707f +#define MATCH_VREM_VV 0x8c002057 +#define MASK_VREM_VV 0xfc00707f +#define MATCH_VMULHU_VV 0x90002057 +#define MASK_VMULHU_VV 0xfc00707f +#define MATCH_VMUL_VV 0x94002057 +#define MASK_VMUL_VV 0xfc00707f +#define MATCH_VMULHSU_VV 0x98002057 +#define MASK_VMULHSU_VV 0xfc00707f +#define MATCH_VMULH_VV 0x9c002057 +#define MASK_VMULH_VV 0xfc00707f +#define MATCH_VMADD_VV 0xa4002057 +#define MASK_VMADD_VV 0xfc00707f +#define MATCH_VNMSUB_VV 0xac002057 +#define MASK_VNMSUB_VV 0xfc00707f +#define MATCH_VMACC_VV 0xb4002057 +#define MASK_VMACC_VV 0xfc00707f +#define MATCH_VNMSAC_VV 0xbc002057 +#define MASK_VNMSAC_VV 0xfc00707f +#define MATCH_VWADDU_VV 0xc0002057 +#define MASK_VWADDU_VV 0xfc00707f +#define MATCH_VWADD_VV 0xc4002057 +#define MASK_VWADD_VV 0xfc00707f +#define MATCH_VWSUBU_VV 0xc8002057 +#define MASK_VWSUBU_VV 0xfc00707f +#define MATCH_VWSUB_VV 0xcc002057 +#define MASK_VWSUB_VV 0xfc00707f +#define MATCH_VWADDU_WV 0xd0002057 +#define MASK_VWADDU_WV 0xfc00707f +#define MATCH_VWADD_WV 0xd4002057 +#define MASK_VWADD_WV 0xfc00707f +#define MATCH_VWSUBU_WV 0xd8002057 +#define MASK_VWSUBU_WV 0xfc00707f +#define MATCH_VWSUB_WV 0xdc002057 +#define MASK_VWSUB_WV 0xfc00707f +#define MATCH_VWMULU_VV 0xe0002057 +#define MASK_VWMULU_VV 0xfc00707f +#define MATCH_VWMULSU_VV 0xe8002057 +#define MASK_VWMULSU_VV 0xfc00707f +#define MATCH_VWMUL_VV 0xec002057 +#define MASK_VWMUL_VV 0xfc00707f +#define MATCH_VWMACCU_VV 0xf0002057 +#define MASK_VWMACCU_VV 0xfc00707f +#define MATCH_VWMACC_VV 0xf4002057 +#define MASK_VWMACC_VV 0xfc00707f +#define MATCH_VWMACCSU_VV 0xfc002057 +#define MASK_VWMACCSU_VV 0xfc00707f +#define MATCH_VAADDU_VX 0x20006057 +#define MASK_VAADDU_VX 0xfc00707f +#define MATCH_VAADD_VX 0x24006057 +#define MASK_VAADD_VX 0xfc00707f +#define MATCH_VASUBU_VX 0x28006057 +#define MASK_VASUBU_VX 0xfc00707f +#define MATCH_VASUB_VX 0x2c006057 +#define MASK_VASUB_VX 0xfc00707f +#define MATCH_VMV_S_X 0x42006057 +#define MASK_VMV_S_X 0xfff0707f +#define MATCH_VSLIDE1UP_VX 0x38006057 +#define MASK_VSLIDE1UP_VX 0xfc00707f +#define MATCH_VSLIDE1DOWN_VX 0x3c006057 +#define MASK_VSLIDE1DOWN_VX 0xfc00707f +#define MATCH_VDIVU_VX 0x80006057 +#define MASK_VDIVU_VX 0xfc00707f +#define MATCH_VDIV_VX 0x84006057 +#define MASK_VDIV_VX 0xfc00707f +#define MATCH_VREMU_VX 0x88006057 +#define MASK_VREMU_VX 0xfc00707f +#define MATCH_VREM_VX 0x8c006057 +#define MASK_VREM_VX 0xfc00707f +#define MATCH_VMULHU_VX 0x90006057 +#define MASK_VMULHU_VX 0xfc00707f +#define MATCH_VMUL_VX 0x94006057 +#define MASK_VMUL_VX 0xfc00707f +#define MATCH_VMULHSU_VX 0x98006057 +#define MASK_VMULHSU_VX 0xfc00707f +#define MATCH_VMULH_VX 0x9c006057 +#define MASK_VMULH_VX 0xfc00707f +#define MATCH_VMADD_VX 0xa4006057 +#define MASK_VMADD_VX 0xfc00707f +#define MATCH_VNMSUB_VX 0xac006057 +#define MASK_VNMSUB_VX 0xfc00707f +#define MATCH_VMACC_VX 0xb4006057 +#define MASK_VMACC_VX 0xfc00707f +#define MATCH_VNMSAC_VX 0xbc006057 +#define MASK_VNMSAC_VX 0xfc00707f +#define MATCH_VWADDU_VX 0xc0006057 +#define MASK_VWADDU_VX 0xfc00707f +#define MATCH_VWADD_VX 0xc4006057 +#define MASK_VWADD_VX 0xfc00707f +#define MATCH_VWSUBU_VX 0xc8006057 +#define MASK_VWSUBU_VX 0xfc00707f +#define MATCH_VWSUB_VX 0xcc006057 +#define MASK_VWSUB_VX 0xfc00707f +#define MATCH_VWADDU_WX 0xd0006057 +#define MASK_VWADDU_WX 0xfc00707f +#define MATCH_VWADD_WX 0xd4006057 +#define MASK_VWADD_WX 0xfc00707f +#define MATCH_VWSUBU_WX 0xd8006057 +#define MASK_VWSUBU_WX 0xfc00707f +#define MATCH_VWSUB_WX 0xdc006057 +#define MASK_VWSUB_WX 0xfc00707f +#define MATCH_VWMULU_VX 0xe0006057 +#define MASK_VWMULU_VX 0xfc00707f +#define MATCH_VWMULSU_VX 0xe8006057 +#define MASK_VWMULSU_VX 0xfc00707f +#define MATCH_VWMUL_VX 0xec006057 +#define MASK_VWMUL_VX 0xfc00707f +#define MATCH_VWMACCU_VX 0xf0006057 +#define MASK_VWMACCU_VX 0xfc00707f +#define MATCH_VWMACC_VX 0xf4006057 +#define MASK_VWMACC_VX 0xfc00707f +#define MATCH_VWMACCUS_VX 0xf8006057 +#define MASK_VWMACCUS_VX 0xfc00707f +#define MATCH_VWMACCSU_VX 0xfc006057 +#define MASK_VWMACCSU_VX 0xfc00707f +#define MATCH_VAMOSWAPEI8_V 0x800002f +#define MASK_VAMOSWAPEI8_V 0xf800707f +#define MATCH_VAMOADDEI8_V 0x2f +#define MASK_VAMOADDEI8_V 0xf800707f +#define MATCH_VAMOXOREI8_V 0x2000002f +#define MASK_VAMOXOREI8_V 0xf800707f +#define MATCH_VAMOANDEI8_V 0x6000002f +#define MASK_VAMOANDEI8_V 0xf800707f +#define MATCH_VAMOOREI8_V 0x4000002f +#define MASK_VAMOOREI8_V 0xf800707f +#define MATCH_VAMOMINEI8_V 0x8000002f +#define MASK_VAMOMINEI8_V 0xf800707f +#define MATCH_VAMOMAXEI8_V 0xa000002f +#define MASK_VAMOMAXEI8_V 0xf800707f +#define MATCH_VAMOMINUEI8_V 0xc000002f +#define MASK_VAMOMINUEI8_V 0xf800707f +#define MATCH_VAMOMAXUEI8_V 0xe000002f +#define MASK_VAMOMAXUEI8_V 0xf800707f +#define MATCH_VAMOSWAPEI16_V 0x800502f +#define MASK_VAMOSWAPEI16_V 0xf800707f +#define MATCH_VAMOADDEI16_V 0x502f +#define MASK_VAMOADDEI16_V 0xf800707f +#define MATCH_VAMOXOREI16_V 0x2000502f +#define MASK_VAMOXOREI16_V 0xf800707f +#define MATCH_VAMOANDEI16_V 0x6000502f +#define MASK_VAMOANDEI16_V 0xf800707f +#define MATCH_VAMOOREI16_V 0x4000502f +#define MASK_VAMOOREI16_V 0xf800707f +#define MATCH_VAMOMINEI16_V 0x8000502f +#define MASK_VAMOMINEI16_V 0xf800707f +#define MATCH_VAMOMAXEI16_V 0xa000502f +#define MASK_VAMOMAXEI16_V 0xf800707f +#define MATCH_VAMOMINUEI16_V 0xc000502f +#define MASK_VAMOMINUEI16_V 0xf800707f +#define MATCH_VAMOMAXUEI16_V 0xe000502f +#define MASK_VAMOMAXUEI16_V 0xf800707f +#define MATCH_VAMOSWAPEI32_V 0x800602f +#define MASK_VAMOSWAPEI32_V 0xf800707f +#define MATCH_VAMOADDEI32_V 0x602f +#define MASK_VAMOADDEI32_V 0xf800707f +#define MATCH_VAMOXOREI32_V 0x2000602f +#define MASK_VAMOXOREI32_V 0xf800707f +#define MATCH_VAMOANDEI32_V 0x6000602f +#define MASK_VAMOANDEI32_V 0xf800707f +#define MATCH_VAMOOREI32_V 0x4000602f +#define MASK_VAMOOREI32_V 0xf800707f +#define MATCH_VAMOMINEI32_V 0x8000602f +#define MASK_VAMOMINEI32_V 0xf800707f +#define MATCH_VAMOMAXEI32_V 0xa000602f +#define MASK_VAMOMAXEI32_V 0xf800707f +#define MATCH_VAMOMINUEI32_V 0xc000602f +#define MASK_VAMOMINUEI32_V 0xf800707f +#define MATCH_VAMOMAXUEI32_V 0xe000602f +#define MASK_VAMOMAXUEI32_V 0xf800707f +#define MATCH_VAMOSWAPEI64_V 0x800702f +#define MASK_VAMOSWAPEI64_V 0xf800707f +#define MATCH_VAMOADDEI64_V 0x702f +#define MASK_VAMOADDEI64_V 0xf800707f +#define MATCH_VAMOXOREI64_V 0x2000702f +#define MASK_VAMOXOREI64_V 0xf800707f +#define MATCH_VAMOANDEI64_V 0x6000702f +#define MASK_VAMOANDEI64_V 0xf800707f +#define MATCH_VAMOOREI64_V 0x4000702f +#define MASK_VAMOOREI64_V 0xf800707f +#define MATCH_VAMOMINEI64_V 0x8000702f +#define MASK_VAMOMINEI64_V 0xf800707f +#define MATCH_VAMOMAXEI64_V 0xa000702f +#define MASK_VAMOMAXEI64_V 0xf800707f +#define MATCH_VAMOMINUEI64_V 0xc000702f +#define MASK_VAMOMINUEI64_V 0xf800707f +#define MATCH_VAMOMAXUEI64_V 0xe000702f +#define MASK_VAMOMAXUEI64_V 0xf800707f +#define MATCH_ADD8 0x48000077 +#define MASK_ADD8 0xfe00707f +#define MATCH_ADD16 0x40000077 +#define MASK_ADD16 0xfe00707f +#define MATCH_ADD64 0xc0001077 +#define MASK_ADD64 0xfe00707f +#define MATCH_AVE 0xe0000077 +#define MASK_AVE 0xfe00707f +#define MATCH_BITREV 0xe6000077 +#define MASK_BITREV 0xfe00707f +#define MATCH_BITREVI 0xe8000077 +#define MASK_BITREVI 0xfc00707f +#define MATCH_BPICK 0x3077 +#define MASK_BPICK 0x600707f +#define MATCH_CLRS8 0xae000077 +#define MASK_CLRS8 0xfff0707f +#define MATCH_CLRS16 0xae800077 +#define MASK_CLRS16 0xfff0707f +#define MATCH_CLRS32 0xaf800077 +#define MASK_CLRS32 0xfff0707f +#define MATCH_CLO8 0xae300077 +#define MASK_CLO8 0xfff0707f +#define MATCH_CLO16 0xaeb00077 +#define MASK_CLO16 0xfff0707f +#define MATCH_CLO32 0xafb00077 +#define MASK_CLO32 0xfff0707f +#define MATCH_CLZ8 0xae100077 +#define MASK_CLZ8 0xfff0707f +#define MATCH_CLZ16 0xae900077 +#define MASK_CLZ16 0xfff0707f +#define MATCH_CLZ32 0xaf900077 +#define MASK_CLZ32 0xfff0707f +#define MATCH_CMPEQ8 0x4e000077 +#define MASK_CMPEQ8 0xfe00707f +#define MATCH_CMPEQ16 0x4c000077 +#define MASK_CMPEQ16 0xfe00707f +#define MATCH_CRAS16 0x44000077 +#define MASK_CRAS16 0xfe00707f +#define MATCH_CRSA16 0x46000077 +#define MASK_CRSA16 0xfe00707f +#define MATCH_INSB 0xac000077 +#define MASK_INSB 0xff80707f +#define MATCH_KABS8 0xad000077 +#define MASK_KABS8 0xfff0707f +#define MATCH_KABS16 0xad100077 +#define MASK_KABS16 0xfff0707f +#define MATCH_KABSW 0xad400077 +#define MASK_KABSW 0xfff0707f +#define MATCH_KADD8 0x18000077 +#define MASK_KADD8 0xfe00707f +#define MATCH_KADD16 0x10000077 +#define MASK_KADD16 0xfe00707f +#define MATCH_KADD64 0x90001077 +#define MASK_KADD64 0xfe00707f +#define MATCH_KADDH 0x4001077 +#define MASK_KADDH 0xfe00707f +#define MATCH_KADDW 0x1077 +#define MASK_KADDW 0xfe00707f +#define MATCH_KCRAS16 0x14000077 +#define MASK_KCRAS16 0xfe00707f +#define MATCH_KCRSA16 0x16000077 +#define MASK_KCRSA16 0xfe00707f +#define MATCH_KDMBB 0xa001077 +#define MASK_KDMBB 0xfe00707f +#define MATCH_KDMBT 0x1a001077 +#define MASK_KDMBT 0xfe00707f +#define MATCH_KDMTT 0x2a001077 +#define MASK_KDMTT 0xfe00707f +#define MATCH_KDMABB 0xd2001077 +#define MASK_KDMABB 0xfe00707f +#define MATCH_KDMABT 0xe2001077 +#define MASK_KDMABT 0xfe00707f +#define MATCH_KDMATT 0xf2001077 +#define MASK_KDMATT 0xfe00707f +#define MATCH_KHM8 0x8e000077 +#define MASK_KHM8 0xfe00707f +#define MATCH_KHMX8 0x9e000077 +#define MASK_KHMX8 0xfe00707f +#define MATCH_KHM16 0x86000077 +#define MASK_KHM16 0xfe00707f +#define MATCH_KHMX16 0x96000077 +#define MASK_KHMX16 0xfe00707f +#define MATCH_KHMBB 0xc001077 +#define MASK_KHMBB 0xfe00707f +#define MATCH_KHMBT 0x1c001077 +#define MASK_KHMBT 0xfe00707f +#define MATCH_KHMTT 0x2c001077 +#define MASK_KHMTT 0xfe00707f +#define MATCH_KMABB 0x5a001077 +#define MASK_KMABB 0xfe00707f +#define MATCH_KMABT 0x6a001077 +#define MASK_KMABT 0xfe00707f +#define MATCH_KMATT 0x7a001077 +#define MASK_KMATT 0xfe00707f +#define MATCH_KMADA 0x48001077 +#define MASK_KMADA 0xfe00707f +#define MATCH_KMAXDA 0x4a001077 +#define MASK_KMAXDA 0xfe00707f +#define MATCH_KMADS 0x5c001077 +#define MASK_KMADS 0xfe00707f +#define MATCH_KMADRS 0x6c001077 +#define MASK_KMADRS 0xfe00707f +#define MATCH_KMAXDS 0x7c001077 +#define MASK_KMAXDS 0xfe00707f +#define MATCH_KMAR64 0x94001077 +#define MASK_KMAR64 0xfe00707f +#define MATCH_KMDA 0x38001077 +#define MASK_KMDA 0xfe00707f +#define MATCH_KMXDA 0x3a001077 +#define MASK_KMXDA 0xfe00707f +#define MATCH_KMMAC 0x60001077 +#define MASK_KMMAC 0xfe00707f +#define MATCH_KMMAC_U 0x70001077 +#define MASK_KMMAC_U 0xfe00707f +#define MATCH_KMMAWB 0x46001077 +#define MASK_KMMAWB 0xfe00707f +#define MATCH_KMMAWB_U 0x56001077 +#define MASK_KMMAWB_U 0xfe00707f +#define MATCH_KMMAWB2 0xce001077 +#define MASK_KMMAWB2 0xfe00707f +#define MATCH_KMMAWB2_U 0xde001077 +#define MASK_KMMAWB2_U 0xfe00707f +#define MATCH_KMMAWT 0x66001077 +#define MASK_KMMAWT 0xfe00707f +#define MATCH_KMMAWT_U 0x76001077 +#define MASK_KMMAWT_U 0xfe00707f +#define MATCH_KMMAWT2 0xee001077 +#define MASK_KMMAWT2 0xfe00707f +#define MATCH_KMMAWT2_U 0xfe001077 +#define MASK_KMMAWT2_U 0xfe00707f +#define MATCH_KMMSB 0x42001077 +#define MASK_KMMSB 0xfe00707f +#define MATCH_KMMSB_U 0x52001077 +#define MASK_KMMSB_U 0xfe00707f +#define MATCH_KMMWB2 0x8e001077 +#define MASK_KMMWB2 0xfe00707f +#define MATCH_KMMWB2_U 0x9e001077 +#define MASK_KMMWB2_U 0xfe00707f +#define MATCH_KMMWT2 0xae001077 +#define MASK_KMMWT2 0xfe00707f +#define MATCH_KMMWT2_U 0xbe001077 +#define MASK_KMMWT2_U 0xfe00707f +#define MATCH_KMSDA 0x4c001077 +#define MASK_KMSDA 0xfe00707f +#define MATCH_KMSXDA 0x4e001077 +#define MASK_KMSXDA 0xfe00707f +#define MATCH_KMSR64 0x96001077 +#define MASK_KMSR64 0xfe00707f +#define MATCH_KSLLW 0x26001077 +#define MASK_KSLLW 0xfe00707f +#define MATCH_KSLLIW 0x36001077 +#define MASK_KSLLIW 0xfe00707f +#define MATCH_KSLL8 0x6c000077 +#define MASK_KSLL8 0xfe00707f +#define MATCH_KSLLI8 0x7c800077 +#define MASK_KSLLI8 0xff80707f +#define MATCH_KSLL16 0x64000077 +#define MASK_KSLL16 0xfe00707f +#define MATCH_KSLLI16 0x75000077 +#define MASK_KSLLI16 0xff00707f +#define MATCH_KSLRA8 0x5e000077 +#define MASK_KSLRA8 0xfe00707f +#define MATCH_KSLRA8_U 0x6e000077 +#define MASK_KSLRA8_U 0xfe00707f +#define MATCH_KSLRA16 0x56000077 +#define MASK_KSLRA16 0xfe00707f +#define MATCH_KSLRA16_U 0x66000077 +#define MASK_KSLRA16_U 0xfe00707f +#define MATCH_KSLRAW 0x6e001077 +#define MASK_KSLRAW 0xfe00707f +#define MATCH_KSLRAW_U 0x7e001077 +#define MASK_KSLRAW_U 0xfe00707f +#define MATCH_KSTAS16 0xc4002077 +#define MASK_KSTAS16 0xfe00707f +#define MATCH_KSTSA16 0xc6002077 +#define MASK_KSTSA16 0xfe00707f +#define MATCH_KSUB8 0x1a000077 +#define MASK_KSUB8 0xfe00707f +#define MATCH_KSUB16 0x12000077 +#define MASK_KSUB16 0xfe00707f +#define MATCH_KSUB64 0x92001077 +#define MASK_KSUB64 0xfe00707f +#define MATCH_KSUBH 0x6001077 +#define MASK_KSUBH 0xfe00707f +#define MATCH_KSUBW 0x2001077 +#define MASK_KSUBW 0xfe00707f +#define MATCH_KWMMUL 0x62001077 +#define MASK_KWMMUL 0xfe00707f +#define MATCH_KWMMUL_U 0x72001077 +#define MASK_KWMMUL_U 0xfe00707f +#define MATCH_MADDR32 0xc4001077 +#define MASK_MADDR32 0xfe00707f +#define MATCH_MAXW 0xf2000077 +#define MASK_MAXW 0xfe00707f +#define MATCH_MINW 0xf0000077 +#define MASK_MINW 0xfe00707f +#define MATCH_MSUBR32 0xc6001077 +#define MASK_MSUBR32 0xfe00707f +#define MATCH_MULR64 0xf0001077 +#define MASK_MULR64 0xfe00707f +#define MATCH_MULSR64 0xe0001077 +#define MASK_MULSR64 0xfe00707f +#define MATCH_PBSAD 0xfc000077 +#define MASK_PBSAD 0xfe00707f +#define MATCH_PBSADA 0xfe000077 +#define MASK_PBSADA 0xfe00707f +#define MATCH_PKBB16 0xe001077 +#define MASK_PKBB16 0xfe00707f +#define MATCH_PKBT16 0x1e001077 +#define MASK_PKBT16 0xfe00707f +#define MATCH_PKTT16 0x2e001077 +#define MASK_PKTT16 0xfe00707f +#define MATCH_PKTB16 0x3e001077 +#define MASK_PKTB16 0xfe00707f +#define MATCH_RADD8 0x8000077 +#define MASK_RADD8 0xfe00707f +#define MATCH_RADD16 0x77 +#define MASK_RADD16 0xfe00707f +#define MATCH_RADD64 0x80001077 +#define MASK_RADD64 0xfe00707f +#define MATCH_RADDW 0x20001077 +#define MASK_RADDW 0xfe00707f +#define MATCH_RCRAS16 0x4000077 +#define MASK_RCRAS16 0xfe00707f +#define MATCH_RCRSA16 0x6000077 +#define MASK_RCRSA16 0xfe00707f +#define MATCH_RSTAS16 0xb4002077 +#define MASK_RSTAS16 0xfe00707f +#define MATCH_RSTSA16 0xb6002077 +#define MASK_RSTSA16 0xfe00707f +#define MATCH_RSUB8 0xa000077 +#define MASK_RSUB8 0xfe00707f +#define MATCH_RSUB16 0x2000077 +#define MASK_RSUB16 0xfe00707f +#define MATCH_RSUB64 0x82001077 +#define MASK_RSUB64 0xfe00707f +#define MATCH_RSUBW 0x22001077 +#define MASK_RSUBW 0xfe00707f +#define MATCH_SCLIP8 0x8c000077 +#define MASK_SCLIP8 0xff80707f +#define MATCH_SCLIP16 0x84000077 +#define MASK_SCLIP16 0xff00707f +#define MATCH_SCLIP32 0xe4000077 +#define MASK_SCLIP32 0xfe00707f +#define MATCH_SCMPLE8 0x1e000077 +#define MASK_SCMPLE8 0xfe00707f +#define MATCH_SCMPLE16 0x1c000077 +#define MASK_SCMPLE16 0xfe00707f +#define MATCH_SCMPLT8 0xe000077 +#define MASK_SCMPLT8 0xfe00707f +#define MATCH_SCMPLT16 0xc000077 +#define MASK_SCMPLT16 0xfe00707f +#define MATCH_SLL8 0x5c000077 +#define MASK_SLL8 0xfe00707f +#define MATCH_SLLI8 0x7c000077 +#define MASK_SLLI8 0xff80707f +#define MATCH_SLL16 0x54000077 +#define MASK_SLL16 0xfe00707f +#define MATCH_SLLI16 0x74000077 +#define MASK_SLLI16 0xff00707f +#define MATCH_SMAL 0x5e001077 +#define MASK_SMAL 0xfe00707f +#define MATCH_SMALBB 0x88001077 +#define MASK_SMALBB 0xfe00707f +#define MATCH_SMALBT 0x98001077 +#define MASK_SMALBT 0xfe00707f +#define MATCH_SMALTT 0xa8001077 +#define MASK_SMALTT 0xfe00707f +#define MATCH_SMALDA 0x8c001077 +#define MASK_SMALDA 0xfe00707f +#define MATCH_SMALXDA 0x9c001077 +#define MASK_SMALXDA 0xfe00707f +#define MATCH_SMALDS 0x8a001077 +#define MASK_SMALDS 0xfe00707f +#define MATCH_SMALDRS 0x9a001077 +#define MASK_SMALDRS 0xfe00707f +#define MATCH_SMALXDS 0xaa001077 +#define MASK_SMALXDS 0xfe00707f +#define MATCH_SMAR64 0x84001077 +#define MASK_SMAR64 0xfe00707f +#define MATCH_SMAQA 0xc8000077 +#define MASK_SMAQA 0xfe00707f +#define MATCH_SMAQA_SU 0xca000077 +#define MASK_SMAQA_SU 0xfe00707f +#define MATCH_SMAX8 0x8a000077 +#define MASK_SMAX8 0xfe00707f +#define MATCH_SMAX16 0x82000077 +#define MASK_SMAX16 0xfe00707f +#define MATCH_SMBB16 0x8001077 +#define MASK_SMBB16 0xfe00707f +#define MATCH_SMBT16 0x18001077 +#define MASK_SMBT16 0xfe00707f +#define MATCH_SMTT16 0x28001077 +#define MASK_SMTT16 0xfe00707f +#define MATCH_SMDS 0x58001077 +#define MASK_SMDS 0xfe00707f +#define MATCH_SMDRS 0x68001077 +#define MASK_SMDRS 0xfe00707f +#define MATCH_SMXDS 0x78001077 +#define MASK_SMXDS 0xfe00707f +#define MATCH_SMIN8 0x88000077 +#define MASK_SMIN8 0xfe00707f +#define MATCH_SMIN16 0x80000077 +#define MASK_SMIN16 0xfe00707f +#define MATCH_SMMUL 0x40001077 +#define MASK_SMMUL 0xfe00707f +#define MATCH_SMMUL_U 0x50001077 +#define MASK_SMMUL_U 0xfe00707f +#define MATCH_SMMWB 0x44001077 +#define MASK_SMMWB 0xfe00707f +#define MATCH_SMMWB_U 0x54001077 +#define MASK_SMMWB_U 0xfe00707f +#define MATCH_SMMWT 0x64001077 +#define MASK_SMMWT 0xfe00707f +#define MATCH_SMMWT_U 0x74001077 +#define MASK_SMMWT_U 0xfe00707f +#define MATCH_SMSLDA 0xac001077 +#define MASK_SMSLDA 0xfe00707f +#define MATCH_SMSLXDA 0xbc001077 +#define MASK_SMSLXDA 0xfe00707f +#define MATCH_SMSR64 0x86001077 +#define MASK_SMSR64 0xfe00707f +#define MATCH_SMUL8 0xa8000077 +#define MASK_SMUL8 0xfe00707f +#define MATCH_SMULX8 0xaa000077 +#define MASK_SMULX8 0xfe00707f +#define MATCH_SMUL16 0xa0000077 +#define MASK_SMUL16 0xfe00707f +#define MATCH_SMULX16 0xa2000077 +#define MASK_SMULX16 0xfe00707f +#define MATCH_SRA_U 0x24001077 +#define MASK_SRA_U 0xfe00707f +#define MATCH_SRAI_U 0xd4001077 +#define MASK_SRAI_U 0xfc00707f +#define MATCH_SRA8 0x58000077 +#define MASK_SRA8 0xfe00707f +#define MATCH_SRA8_U 0x68000077 +#define MASK_SRA8_U 0xfe00707f +#define MATCH_SRAI8 0x78000077 +#define MASK_SRAI8 0xff80707f +#define MATCH_SRAI8_U 0x78800077 +#define MASK_SRAI8_U 0xff80707f +#define MATCH_SRA16 0x50000077 +#define MASK_SRA16 0xfe00707f +#define MATCH_SRA16_U 0x60000077 +#define MASK_SRA16_U 0xfe00707f +#define MATCH_SRAI16 0x70000077 +#define MASK_SRAI16 0xff00707f +#define MATCH_SRAI16_U 0x71000077 +#define MASK_SRAI16_U 0xff00707f +#define MATCH_SRL8 0x5a000077 +#define MASK_SRL8 0xfe00707f +#define MATCH_SRL8_U 0x6a000077 +#define MASK_SRL8_U 0xfe00707f +#define MATCH_SRLI8 0x7a000077 +#define MASK_SRLI8 0xff80707f +#define MATCH_SRLI8_U 0x7a800077 +#define MASK_SRLI8_U 0xff80707f +#define MATCH_SRL16 0x52000077 +#define MASK_SRL16 0xfe00707f +#define MATCH_SRL16_U 0x62000077 +#define MASK_SRL16_U 0xfe00707f +#define MATCH_SRLI16 0x72000077 +#define MASK_SRLI16 0xff00707f +#define MATCH_SRLI16_U 0x73000077 +#define MASK_SRLI16_U 0xff00707f +#define MATCH_STAS16 0xf4002077 +#define MASK_STAS16 0xfe00707f +#define MATCH_STSA16 0xf6002077 +#define MASK_STSA16 0xfe00707f +#define MATCH_SUB8 0x4a000077 +#define MASK_SUB8 0xfe00707f +#define MATCH_SUB16 0x42000077 +#define MASK_SUB16 0xfe00707f +#define MATCH_SUB64 0xc2001077 +#define MASK_SUB64 0xfe00707f +#define MATCH_SUNPKD810 0xac800077 +#define MASK_SUNPKD810 0xfff0707f +#define MATCH_SUNPKD820 0xac900077 +#define MASK_SUNPKD820 0xfff0707f +#define MATCH_SUNPKD830 0xaca00077 +#define MASK_SUNPKD830 0xfff0707f +#define MATCH_SUNPKD831 0xacb00077 +#define MASK_SUNPKD831 0xfff0707f +#define MATCH_SUNPKD832 0xad300077 +#define MASK_SUNPKD832 0xfff0707f +#define MATCH_SWAP8 0xad800077 +#define MASK_SWAP8 0xfff0707f +#define MATCH_UCLIP8 0x8d000077 +#define MASK_UCLIP8 0xff80707f +#define MATCH_UCLIP16 0x85000077 +#define MASK_UCLIP16 0xff00707f +#define MATCH_UCLIP32 0xf4000077 +#define MASK_UCLIP32 0xfe00707f +#define MATCH_UCMPLE8 0x3e000077 +#define MASK_UCMPLE8 0xfe00707f +#define MATCH_UCMPLE16 0x3c000077 +#define MASK_UCMPLE16 0xfe00707f +#define MATCH_UCMPLT8 0x2e000077 +#define MASK_UCMPLT8 0xfe00707f +#define MATCH_UCMPLT16 0x2c000077 +#define MASK_UCMPLT16 0xfe00707f +#define MATCH_UKADD8 0x38000077 +#define MASK_UKADD8 0xfe00707f +#define MATCH_UKADD16 0x30000077 +#define MASK_UKADD16 0xfe00707f +#define MATCH_UKADD64 0xb0001077 +#define MASK_UKADD64 0xfe00707f +#define MATCH_UKADDH 0x14001077 +#define MASK_UKADDH 0xfe00707f +#define MATCH_UKADDW 0x10001077 +#define MASK_UKADDW 0xfe00707f +#define MATCH_UKCRAS16 0x34000077 +#define MASK_UKCRAS16 0xfe00707f +#define MATCH_UKCRSA16 0x36000077 +#define MASK_UKCRSA16 0xfe00707f +#define MATCH_UKMAR64 0xb4001077 +#define MASK_UKMAR64 0xfe00707f +#define MATCH_UKMSR64 0xb6001077 +#define MASK_UKMSR64 0xfe00707f +#define MATCH_UKSTAS16 0xe4002077 +#define MASK_UKSTAS16 0xfe00707f +#define MATCH_UKSTSA16 0xe6002077 +#define MASK_UKSTSA16 0xfe00707f +#define MATCH_UKSUB8 0x3a000077 +#define MASK_UKSUB8 0xfe00707f +#define MATCH_UKSUB16 0x32000077 +#define MASK_UKSUB16 0xfe00707f +#define MATCH_UKSUB64 0xb2001077 +#define MASK_UKSUB64 0xfe00707f +#define MATCH_UKSUBH 0x16001077 +#define MASK_UKSUBH 0xfe00707f +#define MATCH_UKSUBW 0x12001077 +#define MASK_UKSUBW 0xfe00707f +#define MATCH_UMAR64 0xa4001077 +#define MASK_UMAR64 0xfe00707f +#define MATCH_UMAQA 0xcc000077 +#define MASK_UMAQA 0xfe00707f +#define MATCH_UMAX8 0x9a000077 +#define MASK_UMAX8 0xfe00707f +#define MATCH_UMAX16 0x92000077 +#define MASK_UMAX16 0xfe00707f +#define MATCH_UMIN8 0x98000077 +#define MASK_UMIN8 0xfe00707f +#define MATCH_UMIN16 0x90000077 +#define MASK_UMIN16 0xfe00707f +#define MATCH_UMSR64 0xa6001077 +#define MASK_UMSR64 0xfe00707f +#define MATCH_UMUL8 0xb8000077 +#define MASK_UMUL8 0xfe00707f +#define MATCH_UMULX8 0xba000077 +#define MASK_UMULX8 0xfe00707f +#define MATCH_UMUL16 0xb0000077 +#define MASK_UMUL16 0xfe00707f +#define MATCH_UMULX16 0xb2000077 +#define MASK_UMULX16 0xfe00707f +#define MATCH_URADD8 0x28000077 +#define MASK_URADD8 0xfe00707f +#define MATCH_URADD16 0x20000077 +#define MASK_URADD16 0xfe00707f +#define MATCH_URADD64 0xa0001077 +#define MASK_URADD64 0xfe00707f +#define MATCH_URADDW 0x30001077 +#define MASK_URADDW 0xfe00707f +#define MATCH_URCRAS16 0x24000077 +#define MASK_URCRAS16 0xfe00707f +#define MATCH_URCRSA16 0x26000077 +#define MASK_URCRSA16 0xfe00707f +#define MATCH_URSTAS16 0xd4002077 +#define MASK_URSTAS16 0xfe00707f +#define MATCH_URSTSA16 0xd6002077 +#define MASK_URSTSA16 0xfe00707f +#define MATCH_URSUB8 0x2a000077 +#define MASK_URSUB8 0xfe00707f +#define MATCH_URSUB16 0x22000077 +#define MASK_URSUB16 0xfe00707f +#define MATCH_URSUB64 0xa2001077 +#define MASK_URSUB64 0xfe00707f +#define MATCH_URSUBW 0x32001077 +#define MASK_URSUBW 0xfe00707f +#define MATCH_WEXTI 0xde000077 +#define MASK_WEXTI 0xfe00707f +#define MATCH_WEXT 0xce000077 +#define MASK_WEXT 0xfe00707f +#define MATCH_ZUNPKD810 0xacc00077 +#define MASK_ZUNPKD810 0xfff0707f +#define MATCH_ZUNPKD820 0xacd00077 +#define MASK_ZUNPKD820 0xfff0707f +#define MATCH_ZUNPKD830 0xace00077 +#define MASK_ZUNPKD830 0xfff0707f +#define MATCH_ZUNPKD831 0xacf00077 +#define MASK_ZUNPKD831 0xfff0707f +#define MATCH_ZUNPKD832 0xad700077 +#define MASK_ZUNPKD832 0xfff0707f +#define MATCH_ADD32 0x40002077 +#define MASK_ADD32 0xfe00707f +#define MATCH_CRAS32 0x44002077 +#define MASK_CRAS32 0xfe00707f +#define MATCH_CRSA32 0x46002077 +#define MASK_CRSA32 0xfe00707f +#define MATCH_KABS32 0xad200077 +#define MASK_KABS32 0xfff0707f +#define MATCH_KADD32 0x10002077 +#define MASK_KADD32 0xfe00707f +#define MATCH_KCRAS32 0x14002077 +#define MASK_KCRAS32 0xfe00707f +#define MATCH_KCRSA32 0x16002077 +#define MASK_KCRSA32 0xfe00707f +#define MATCH_KDMBB16 0xda001077 +#define MASK_KDMBB16 0xfe00707f +#define MATCH_KDMBT16 0xea001077 +#define MASK_KDMBT16 0xfe00707f +#define MATCH_KDMTT16 0xfa001077 +#define MASK_KDMTT16 0xfe00707f +#define MATCH_KDMABB16 0xd8001077 +#define MASK_KDMABB16 0xfe00707f +#define MATCH_KDMABT16 0xe8001077 +#define MASK_KDMABT16 0xfe00707f +#define MATCH_KDMATT16 0xf8001077 +#define MASK_KDMATT16 0xfe00707f +#define MATCH_KHMBB16 0xdc001077 +#define MASK_KHMBB16 0xfe00707f +#define MATCH_KHMBT16 0xec001077 +#define MASK_KHMBT16 0xfe00707f +#define MATCH_KHMTT16 0xfc001077 +#define MASK_KHMTT16 0xfe00707f +#define MATCH_KMABB32 0x5a002077 +#define MASK_KMABB32 0xfe00707f +#define MATCH_KMABT32 0x6a002077 +#define MASK_KMABT32 0xfe00707f +#define MATCH_KMATT32 0x7a002077 +#define MASK_KMATT32 0xfe00707f +#define MATCH_KMAXDA32 0x4a002077 +#define MASK_KMAXDA32 0xfe00707f +#define MATCH_KMDA32 0x38002077 +#define MASK_KMDA32 0xfe00707f +#define MATCH_KMXDA32 0x3a002077 +#define MASK_KMXDA32 0xfe00707f +#define MATCH_KMADS32 0x5c002077 +#define MASK_KMADS32 0xfe00707f +#define MATCH_KMADRS32 0x6c002077 +#define MASK_KMADRS32 0xfe00707f +#define MATCH_KMAXDS32 0x7c002077 +#define MASK_KMAXDS32 0xfe00707f +#define MATCH_KMSDA32 0x4c002077 +#define MASK_KMSDA32 0xfe00707f +#define MATCH_KMSXDA32 0x4e002077 +#define MASK_KMSXDA32 0xfe00707f +#define MATCH_KSLL32 0x64002077 +#define MASK_KSLL32 0xfe00707f +#define MATCH_KSLLI32 0x84002077 +#define MASK_KSLLI32 0xfe00707f +#define MATCH_KSLRA32 0x56002077 +#define MASK_KSLRA32 0xfe00707f +#define MATCH_KSLRA32_U 0x66002077 +#define MASK_KSLRA32_U 0xfe00707f +#define MATCH_KSTAS32 0xc0002077 +#define MASK_KSTAS32 0xfe00707f +#define MATCH_KSTSA32 0xc2002077 +#define MASK_KSTSA32 0xfe00707f +#define MATCH_KSUB32 0x12002077 +#define MASK_KSUB32 0xfe00707f +#define MATCH_PKBB32 0xe002077 +#define MASK_PKBB32 0xfe00707f +#define MATCH_PKBT32 0x1e002077 +#define MASK_PKBT32 0xfe00707f +#define MATCH_PKTT32 0x2e002077 +#define MASK_PKTT32 0xfe00707f +#define MATCH_PKTB32 0x3e002077 +#define MASK_PKTB32 0xfe00707f +#define MATCH_RADD32 0x2077 +#define MASK_RADD32 0xfe00707f +#define MATCH_RCRAS32 0x4002077 +#define MASK_RCRAS32 0xfe00707f +#define MATCH_RCRSA32 0x6002077 +#define MASK_RCRSA32 0xfe00707f +#define MATCH_RSTAS32 0xb0002077 +#define MASK_RSTAS32 0xfe00707f +#define MATCH_RSTSA32 0xb2002077 +#define MASK_RSTSA32 0xfe00707f +#define MATCH_RSUB32 0x2002077 +#define MASK_RSUB32 0xfe00707f +#define MATCH_SLL32 0x54002077 +#define MASK_SLL32 0xfe00707f +#define MATCH_SLLI32 0x74002077 +#define MASK_SLLI32 0xfe00707f +#define MATCH_SMAX32 0x92002077 +#define MASK_SMAX32 0xfe00707f +#define MATCH_SMBT32 0x18002077 +#define MASK_SMBT32 0xfe00707f +#define MATCH_SMTT32 0x28002077 +#define MASK_SMTT32 0xfe00707f +#define MATCH_SMDS32 0x58002077 +#define MASK_SMDS32 0xfe00707f +#define MATCH_SMDRS32 0x68002077 +#define MASK_SMDRS32 0xfe00707f +#define MATCH_SMXDS32 0x78002077 +#define MASK_SMXDS32 0xfe00707f +#define MATCH_SMIN32 0x90002077 +#define MASK_SMIN32 0xfe00707f +#define MATCH_SRA32 0x50002077 +#define MASK_SRA32 0xfe00707f +#define MATCH_SRA32_U 0x60002077 +#define MASK_SRA32_U 0xfe00707f +#define MATCH_SRAI32 0x70002077 +#define MASK_SRAI32 0xfe00707f +#define MATCH_SRAI32_U 0x80002077 +#define MASK_SRAI32_U 0xfe00707f +#define MATCH_SRAIW_U 0x34001077 +#define MASK_SRAIW_U 0xfe00707f +#define MATCH_SRL32 0x52002077 +#define MASK_SRL32 0xfe00707f +#define MATCH_SRL32_U 0x62002077 +#define MASK_SRL32_U 0xfe00707f +#define MATCH_SRLI32 0x72002077 +#define MASK_SRLI32 0xfe00707f +#define MATCH_SRLI32_U 0x82002077 +#define MASK_SRLI32_U 0xfe00707f +#define MATCH_STAS32 0xf0002077 +#define MASK_STAS32 0xfe00707f +#define MATCH_STSA32 0xf2002077 +#define MASK_STSA32 0xfe00707f +#define MATCH_SUB32 0x42002077 +#define MASK_SUB32 0xfe00707f +#define MATCH_UKADD32 0x30002077 +#define MASK_UKADD32 0xfe00707f +#define MATCH_UKCRAS32 0x34002077 +#define MASK_UKCRAS32 0xfe00707f +#define MATCH_UKCRSA32 0x36002077 +#define MASK_UKCRSA32 0xfe00707f +#define MATCH_UKSTAS32 0xe0002077 +#define MASK_UKSTAS32 0xfe00707f +#define MATCH_UKSTSA32 0xe2002077 +#define MASK_UKSTSA32 0xfe00707f +#define MATCH_UKSUB32 0x32002077 +#define MASK_UKSUB32 0xfe00707f +#define MATCH_UMAX32 0xa2002077 +#define MASK_UMAX32 0xfe00707f +#define MATCH_UMIN32 0xa0002077 +#define MASK_UMIN32 0xfe00707f +#define MATCH_URADD32 0x20002077 +#define MASK_URADD32 0xfe00707f +#define MATCH_URCRAS32 0x24002077 +#define MASK_URCRAS32 0xfe00707f +#define MATCH_URCRSA32 0x26002077 +#define MASK_URCRSA32 0xfe00707f +#define MATCH_URSTAS32 0xd0002077 +#define MASK_URSTAS32 0xfe00707f +#define MATCH_URSTSA32 0xd2002077 +#define MASK_URSTSA32 0xfe00707f +#define MATCH_URSUB32 0x22002077 +#define MASK_URSUB32 0xfe00707f +#define MATCH_VMVNFR_V 0x9e003057 +#define MASK_VMVNFR_V 0xfe00707f +#define MATCH_VL1R_V 0x2800007 +#define MASK_VL1R_V 0xfff0707f +#define MATCH_VL2R_V 0x6805007 +#define MASK_VL2R_V 0xfff0707f +#define MATCH_VL4R_V 0xe806007 +#define MASK_VL4R_V 0xfff0707f +#define MATCH_VL8R_V 0x1e807007 +#define MASK_VL8R_V 0xfff0707f +#define MATCH_VLE1_V 0x2b00007 +#define MASK_VLE1_V 0xfff0707f +#define MATCH_VSE1_V 0x2b00027 +#define MASK_VSE1_V 0xfff0707f +#define MATCH_VFREDSUM_VS 0x4001057 +#define MASK_VFREDSUM_VS 0xfc00707f +#define MATCH_VFWREDSUM_VS 0xc4001057 +#define MASK_VFWREDSUM_VS 0xfc00707f +#define MATCH_VPOPC_M 0x40082057 +#define MASK_VPOPC_M 0xfc0ff07f +#define MATCH_VMORNOT_MM 0x70002057 +#define MASK_VMORNOT_MM 0xfc00707f +#define MATCH_VMANDNOT_MM 0x60002057 +#define MASK_VMANDNOT_MM 0xfc00707f +#define CSR_FFLAGS 0x1 +#define CSR_FRM 0x2 +#define CSR_FCSR 0x3 +#define CSR_VSTART 0x8 +#define CSR_VXSAT 0x9 +#define CSR_VXRM 0xa +#define CSR_VCSR 0xf +#define CSR_SEED 0x15 +#define CSR_CYCLE 0xc00 +#define CSR_TIME 0xc01 +#define CSR_INSTRET 0xc02 +#define CSR_HPMCOUNTER3 0xc03 +#define CSR_HPMCOUNTER4 0xc04 +#define CSR_HPMCOUNTER5 0xc05 +#define CSR_HPMCOUNTER6 0xc06 +#define CSR_HPMCOUNTER7 0xc07 +#define CSR_HPMCOUNTER8 0xc08 +#define CSR_HPMCOUNTER9 0xc09 +#define CSR_HPMCOUNTER10 0xc0a +#define CSR_HPMCOUNTER11 0xc0b +#define CSR_HPMCOUNTER12 0xc0c +#define CSR_HPMCOUNTER13 0xc0d +#define CSR_HPMCOUNTER14 0xc0e +#define CSR_HPMCOUNTER15 0xc0f +#define CSR_HPMCOUNTER16 0xc10 +#define CSR_HPMCOUNTER17 0xc11 +#define CSR_HPMCOUNTER18 0xc12 +#define CSR_HPMCOUNTER19 0xc13 +#define CSR_HPMCOUNTER20 0xc14 +#define CSR_HPMCOUNTER21 0xc15 +#define CSR_HPMCOUNTER22 0xc16 +#define CSR_HPMCOUNTER23 0xc17 +#define CSR_HPMCOUNTER24 0xc18 +#define CSR_HPMCOUNTER25 0xc19 +#define CSR_HPMCOUNTER26 0xc1a +#define CSR_HPMCOUNTER27 0xc1b +#define CSR_HPMCOUNTER28 0xc1c +#define CSR_HPMCOUNTER29 0xc1d +#define CSR_HPMCOUNTER30 0xc1e +#define CSR_HPMCOUNTER31 0xc1f +#define CSR_VL 0xc20 +#define CSR_VTYPE 0xc21 +#define CSR_VLENB 0xc22 +#define CSR_SSTATUS 0x100 +#define CSR_SEDELEG 0x102 +#define CSR_SIDELEG 0x103 +#define CSR_SIE 0x104 +#define CSR_STVEC 0x105 +#define CSR_SCOUNTEREN 0x106 +#define CSR_SENVCFG 0x10a +#define CSR_SSCRATCH 0x140 +#define CSR_SEPC 0x141 +#define CSR_SCAUSE 0x142 +#define CSR_STVAL 0x143 +#define CSR_SIP 0x144 +#define CSR_SATP 0x180 +#define CSR_SCONTEXT 0x5a8 +#define CSR_VSSTATUS 0x200 +#define CSR_VSIE 0x204 +#define CSR_VSTVEC 0x205 +#define CSR_VSSCRATCH 0x240 +#define CSR_VSEPC 0x241 +#define CSR_VSCAUSE 0x242 +#define CSR_VSTVAL 0x243 +#define CSR_VSIP 0x244 +#define CSR_VSATP 0x280 +#define CSR_HSTATUS 0x600 +#define CSR_HEDELEG 0x602 +#define CSR_HIDELEG 0x603 +#define CSR_HIE 0x604 +#define CSR_HTIMEDELTA 0x605 +#define CSR_HCOUNTEREN 0x606 +#define CSR_HGEIE 0x607 +#define CSR_HENVCFG 0x60a +#define CSR_HTVAL 0x643 +#define CSR_HIP 0x644 +#define CSR_HVIP 0x645 +#define CSR_HTINST 0x64a +#define CSR_HGATP 0x680 +#define CSR_HCONTEXT 0x6a8 +#define CSR_HGEIP 0xe12 +#define CSR_UTVT 0x7 +#define CSR_UNXTI 0x45 +#define CSR_UINTSTATUS 0x46 +#define CSR_USCRATCHCSW 0x48 +#define CSR_USCRATCHCSWL 0x49 +#define CSR_STVT 0x107 +#define CSR_SNXTI 0x145 +#define CSR_SINTSTATUS 0x146 +#define CSR_SSCRATCHCSW 0x148 +#define CSR_SSCRATCHCSWL 0x149 +#define CSR_MTVT 0x307 +#define CSR_MNXTI 0x345 +#define CSR_MINTSTATUS 0x346 +#define CSR_MSCRATCHCSW 0x348 +#define CSR_MSCRATCHCSWL 0x349 +#define CSR_MSTATUS 0x300 +#define CSR_MISA 0x301 +#define CSR_MEDELEG 0x302 +#define CSR_MIDELEG 0x303 +#define CSR_MIE 0x304 +#define CSR_MTVEC 0x305 +#define CSR_MCOUNTEREN 0x306 +#define CSR_MENVCFG 0x30a +#define CSR_MCOUNTINHIBIT 0x320 +#define CSR_MSCRATCH 0x340 +#define CSR_MEPC 0x341 +#define CSR_MCAUSE 0x342 +#define CSR_MTVAL 0x343 +#define CSR_MIP 0x344 +#define CSR_MTINST 0x34a +#define CSR_MTVAL2 0x34b +#define CSR_PMPCFG0 0x3a0 +#define CSR_PMPCFG1 0x3a1 +#define CSR_PMPCFG2 0x3a2 +#define CSR_PMPCFG3 0x3a3 +#define CSR_PMPCFG4 0x3a4 +#define CSR_PMPCFG5 0x3a5 +#define CSR_PMPCFG6 0x3a6 +#define CSR_PMPCFG7 0x3a7 +#define CSR_PMPCFG8 0x3a8 +#define CSR_PMPCFG9 0x3a9 +#define CSR_PMPCFG10 0x3aa +#define CSR_PMPCFG11 0x3ab +#define CSR_PMPCFG12 0x3ac +#define CSR_PMPCFG13 0x3ad +#define CSR_PMPCFG14 0x3ae +#define CSR_PMPCFG15 0x3af +#define CSR_PMPADDR0 0x3b0 +#define CSR_PMPADDR1 0x3b1 +#define CSR_PMPADDR2 0x3b2 +#define CSR_PMPADDR3 0x3b3 +#define CSR_PMPADDR4 0x3b4 +#define CSR_PMPADDR5 0x3b5 +#define CSR_PMPADDR6 0x3b6 +#define CSR_PMPADDR7 0x3b7 +#define CSR_PMPADDR8 0x3b8 +#define CSR_PMPADDR9 0x3b9 +#define CSR_PMPADDR10 0x3ba +#define CSR_PMPADDR11 0x3bb +#define CSR_PMPADDR12 0x3bc +#define CSR_PMPADDR13 0x3bd +#define CSR_PMPADDR14 0x3be +#define CSR_PMPADDR15 0x3bf +#define CSR_PMPADDR16 0x3c0 +#define CSR_PMPADDR17 0x3c1 +#define CSR_PMPADDR18 0x3c2 +#define CSR_PMPADDR19 0x3c3 +#define CSR_PMPADDR20 0x3c4 +#define CSR_PMPADDR21 0x3c5 +#define CSR_PMPADDR22 0x3c6 +#define CSR_PMPADDR23 0x3c7 +#define CSR_PMPADDR24 0x3c8 +#define CSR_PMPADDR25 0x3c9 +#define CSR_PMPADDR26 0x3ca +#define CSR_PMPADDR27 0x3cb +#define CSR_PMPADDR28 0x3cc +#define CSR_PMPADDR29 0x3cd +#define CSR_PMPADDR30 0x3ce +#define CSR_PMPADDR31 0x3cf +#define CSR_PMPADDR32 0x3d0 +#define CSR_PMPADDR33 0x3d1 +#define CSR_PMPADDR34 0x3d2 +#define CSR_PMPADDR35 0x3d3 +#define CSR_PMPADDR36 0x3d4 +#define CSR_PMPADDR37 0x3d5 +#define CSR_PMPADDR38 0x3d6 +#define CSR_PMPADDR39 0x3d7 +#define CSR_PMPADDR40 0x3d8 +#define CSR_PMPADDR41 0x3d9 +#define CSR_PMPADDR42 0x3da +#define CSR_PMPADDR43 0x3db +#define CSR_PMPADDR44 0x3dc +#define CSR_PMPADDR45 0x3dd +#define CSR_PMPADDR46 0x3de +#define CSR_PMPADDR47 0x3df +#define CSR_PMPADDR48 0x3e0 +#define CSR_PMPADDR49 0x3e1 +#define CSR_PMPADDR50 0x3e2 +#define CSR_PMPADDR51 0x3e3 +#define CSR_PMPADDR52 0x3e4 +#define CSR_PMPADDR53 0x3e5 +#define CSR_PMPADDR54 0x3e6 +#define CSR_PMPADDR55 0x3e7 +#define CSR_PMPADDR56 0x3e8 +#define CSR_PMPADDR57 0x3e9 +#define CSR_PMPADDR58 0x3ea +#define CSR_PMPADDR59 0x3eb +#define CSR_PMPADDR60 0x3ec +#define CSR_PMPADDR61 0x3ed +#define CSR_PMPADDR62 0x3ee +#define CSR_PMPADDR63 0x3ef +#define CSR_MSECCFG 0x747 +#define CSR_TSELECT 0x7a0 +#define CSR_TDATA1 0x7a1 +#define CSR_TDATA2 0x7a2 +#define CSR_TDATA3 0x7a3 +#define CSR_TINFO 0x7a4 +#define CSR_TCONTROL 0x7a5 +#define CSR_MCONTEXT 0x7a8 +#define CSR_MSCONTEXT 0x7aa +#define CSR_DCSR 0x7b0 +#define CSR_DPC 0x7b1 +#define CSR_DSCRATCH0 0x7b2 +#define CSR_DSCRATCH1 0x7b3 +#define CSR_MCYCLE 0xb00 +#define CSR_MINSTRET 0xb02 +#define CSR_MHPMCOUNTER3 0xb03 +#define CSR_MHPMCOUNTER4 0xb04 +#define CSR_MHPMCOUNTER5 0xb05 +#define CSR_MHPMCOUNTER6 0xb06 +#define CSR_MHPMCOUNTER7 0xb07 +#define CSR_MHPMCOUNTER8 0xb08 +#define CSR_MHPMCOUNTER9 0xb09 +#define CSR_MHPMCOUNTER10 0xb0a +#define CSR_MHPMCOUNTER11 0xb0b +#define CSR_MHPMCOUNTER12 0xb0c +#define CSR_MHPMCOUNTER13 0xb0d +#define CSR_MHPMCOUNTER14 0xb0e +#define CSR_MHPMCOUNTER15 0xb0f +#define CSR_MHPMCOUNTER16 0xb10 +#define CSR_MHPMCOUNTER17 0xb11 +#define CSR_MHPMCOUNTER18 0xb12 +#define CSR_MHPMCOUNTER19 0xb13 +#define CSR_MHPMCOUNTER20 0xb14 +#define CSR_MHPMCOUNTER21 0xb15 +#define CSR_MHPMCOUNTER22 0xb16 +#define CSR_MHPMCOUNTER23 0xb17 +#define CSR_MHPMCOUNTER24 0xb18 +#define CSR_MHPMCOUNTER25 0xb19 +#define CSR_MHPMCOUNTER26 0xb1a +#define CSR_MHPMCOUNTER27 0xb1b +#define CSR_MHPMCOUNTER28 0xb1c +#define CSR_MHPMCOUNTER29 0xb1d +#define CSR_MHPMCOUNTER30 0xb1e +#define CSR_MHPMCOUNTER31 0xb1f +#define CSR_MHPMEVENT3 0x323 +#define CSR_MHPMEVENT4 0x324 +#define CSR_MHPMEVENT5 0x325 +#define CSR_MHPMEVENT6 0x326 +#define CSR_MHPMEVENT7 0x327 +#define CSR_MHPMEVENT8 0x328 +#define CSR_MHPMEVENT9 0x329 +#define CSR_MHPMEVENT10 0x32a +#define CSR_MHPMEVENT11 0x32b +#define CSR_MHPMEVENT12 0x32c +#define CSR_MHPMEVENT13 0x32d +#define CSR_MHPMEVENT14 0x32e +#define CSR_MHPMEVENT15 0x32f +#define CSR_MHPMEVENT16 0x330 +#define CSR_MHPMEVENT17 0x331 +#define CSR_MHPMEVENT18 0x332 +#define CSR_MHPMEVENT19 0x333 +#define CSR_MHPMEVENT20 0x334 +#define CSR_MHPMEVENT21 0x335 +#define CSR_MHPMEVENT22 0x336 +#define CSR_MHPMEVENT23 0x337 +#define CSR_MHPMEVENT24 0x338 +#define CSR_MHPMEVENT25 0x339 +#define CSR_MHPMEVENT26 0x33a +#define CSR_MHPMEVENT27 0x33b +#define CSR_MHPMEVENT28 0x33c +#define CSR_MHPMEVENT29 0x33d +#define CSR_MHPMEVENT30 0x33e +#define CSR_MHPMEVENT31 0x33f +#define CSR_MVENDORID 0xf11 +#define CSR_MARCHID 0xf12 +#define CSR_MIMPID 0xf13 +#define CSR_MHARTID 0xf14 +#define CSR_MCONFIGPTR 0xf15 +#define CSR_HTIMEDELTAH 0x615 +#define CSR_HENVCFGH 0x61a +#define CSR_CYCLEH 0xc80 +#define CSR_TIMEH 0xc81 +#define CSR_INSTRETH 0xc82 +#define CSR_HPMCOUNTER3H 0xc83 +#define CSR_HPMCOUNTER4H 0xc84 +#define CSR_HPMCOUNTER5H 0xc85 +#define CSR_HPMCOUNTER6H 0xc86 +#define CSR_HPMCOUNTER7H 0xc87 +#define CSR_HPMCOUNTER8H 0xc88 +#define CSR_HPMCOUNTER9H 0xc89 +#define CSR_HPMCOUNTER10H 0xc8a +#define CSR_HPMCOUNTER11H 0xc8b +#define CSR_HPMCOUNTER12H 0xc8c +#define CSR_HPMCOUNTER13H 0xc8d +#define CSR_HPMCOUNTER14H 0xc8e +#define CSR_HPMCOUNTER15H 0xc8f +#define CSR_HPMCOUNTER16H 0xc90 +#define CSR_HPMCOUNTER17H 0xc91 +#define CSR_HPMCOUNTER18H 0xc92 +#define CSR_HPMCOUNTER19H 0xc93 +#define CSR_HPMCOUNTER20H 0xc94 +#define CSR_HPMCOUNTER21H 0xc95 +#define CSR_HPMCOUNTER22H 0xc96 +#define CSR_HPMCOUNTER23H 0xc97 +#define CSR_HPMCOUNTER24H 0xc98 +#define CSR_HPMCOUNTER25H 0xc99 +#define CSR_HPMCOUNTER26H 0xc9a +#define CSR_HPMCOUNTER27H 0xc9b +#define CSR_HPMCOUNTER28H 0xc9c +#define CSR_HPMCOUNTER29H 0xc9d +#define CSR_HPMCOUNTER30H 0xc9e +#define CSR_HPMCOUNTER31H 0xc9f +#define CSR_MSTATUSH 0x310 +#define CSR_MENVCFGH 0x31a +#define CSR_MSECCFGH 0x757 +#define CSR_MCYCLEH 0xb80 +#define CSR_MINSTRETH 0xb82 +#define CSR_MHPMCOUNTER3H 0xb83 +#define CSR_MHPMCOUNTER4H 0xb84 +#define CSR_MHPMCOUNTER5H 0xb85 +#define CSR_MHPMCOUNTER6H 0xb86 +#define CSR_MHPMCOUNTER7H 0xb87 +#define CSR_MHPMCOUNTER8H 0xb88 +#define CSR_MHPMCOUNTER9H 0xb89 +#define CSR_MHPMCOUNTER10H 0xb8a +#define CSR_MHPMCOUNTER11H 0xb8b +#define CSR_MHPMCOUNTER12H 0xb8c +#define CSR_MHPMCOUNTER13H 0xb8d +#define CSR_MHPMCOUNTER14H 0xb8e +#define CSR_MHPMCOUNTER15H 0xb8f +#define CSR_MHPMCOUNTER16H 0xb90 +#define CSR_MHPMCOUNTER17H 0xb91 +#define CSR_MHPMCOUNTER18H 0xb92 +#define CSR_MHPMCOUNTER19H 0xb93 +#define CSR_MHPMCOUNTER20H 0xb94 +#define CSR_MHPMCOUNTER21H 0xb95 +#define CSR_MHPMCOUNTER22H 0xb96 +#define CSR_MHPMCOUNTER23H 0xb97 +#define CSR_MHPMCOUNTER24H 0xb98 +#define CSR_MHPMCOUNTER25H 0xb99 +#define CSR_MHPMCOUNTER26H 0xb9a +#define CSR_MHPMCOUNTER27H 0xb9b +#define CSR_MHPMCOUNTER28H 0xb9c +#define CSR_MHPMCOUNTER29H 0xb9d +#define CSR_MHPMCOUNTER30H 0xb9e +#define CSR_MHPMCOUNTER31H 0xb9f +#define CAUSE_MISALIGNED_FETCH 0x0 +#define CAUSE_FETCH_ACCESS 0x1 +#define CAUSE_ILLEGAL_INSTRUCTION 0x2 +#define CAUSE_BREAKPOINT 0x3 +#define CAUSE_MISALIGNED_LOAD 0x4 +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_MISALIGNED_STORE 0x6 +#define CAUSE_STORE_ACCESS 0x7 +#define CAUSE_USER_ECALL 0x8 +#define CAUSE_SUPERVISOR_ECALL 0x9 +#define CAUSE_VIRTUAL_SUPERVISOR_ECALL 0xa +#define CAUSE_MACHINE_ECALL 0xb +#define CAUSE_FETCH_PAGE_FAULT 0xc +#define CAUSE_LOAD_PAGE_FAULT 0xd +#define CAUSE_STORE_PAGE_FAULT 0xf +#define CAUSE_FETCH_GUEST_PAGE_FAULT 0x14 +#define CAUSE_LOAD_GUEST_PAGE_FAULT 0x15 +#define CAUSE_VIRTUAL_INSTRUCTION 0x16 +#define CAUSE_STORE_GUEST_PAGE_FAULT 0x17 +#endif +#ifdef DECLARE_INSN +DECLARE_INSN(slli_rv32, MATCH_SLLI_RV32, MASK_SLLI_RV32) +DECLARE_INSN(srli_rv32, MATCH_SRLI_RV32, MASK_SRLI_RV32) +DECLARE_INSN(srai_rv32, MATCH_SRAI_RV32, MASK_SRAI_RV32) +DECLARE_INSN(frflags, MATCH_FRFLAGS, MASK_FRFLAGS) +DECLARE_INSN(fsflags, MATCH_FSFLAGS, MASK_FSFLAGS) +DECLARE_INSN(fsflagsi, MATCH_FSFLAGSI, MASK_FSFLAGSI) +DECLARE_INSN(frrm, MATCH_FRRM, MASK_FRRM) +DECLARE_INSN(fsrm, MATCH_FSRM, MASK_FSRM) +DECLARE_INSN(fsrmi, MATCH_FSRMI, MASK_FSRMI) +DECLARE_INSN(fscsr, MATCH_FSCSR, MASK_FSCSR) +DECLARE_INSN(frcsr, MATCH_FRCSR, MASK_FRCSR) +DECLARE_INSN(rdcycle, MATCH_RDCYCLE, MASK_RDCYCLE) +DECLARE_INSN(rdtime, MATCH_RDTIME, MASK_RDTIME) +DECLARE_INSN(rdinstret, MATCH_RDINSTRET, MASK_RDINSTRET) +DECLARE_INSN(rdcycleh, MATCH_RDCYCLEH, MASK_RDCYCLEH) +DECLARE_INSN(rdtimeh, MATCH_RDTIMEH, MASK_RDTIMEH) +DECLARE_INSN(rdinstreth, MATCH_RDINSTRETH, MASK_RDINSTRETH) +DECLARE_INSN(scall, MATCH_SCALL, MASK_SCALL) +DECLARE_INSN(sbreak, MATCH_SBREAK, MASK_SBREAK) +DECLARE_INSN(fmv_x_s, MATCH_FMV_X_S, MASK_FMV_X_S) +DECLARE_INSN(fmv_s_x, MATCH_FMV_S_X, MASK_FMV_S_X) +DECLARE_INSN(fence_tso, MATCH_FENCE_TSO, MASK_FENCE_TSO) +DECLARE_INSN(pause, MATCH_PAUSE, MASK_PAUSE) +DECLARE_INSN(beq, MATCH_BEQ, MASK_BEQ) +DECLARE_INSN(bne, MATCH_BNE, MASK_BNE) +DECLARE_INSN(blt, MATCH_BLT, MASK_BLT) +DECLARE_INSN(bge, MATCH_BGE, MASK_BGE) +DECLARE_INSN(bltu, MATCH_BLTU, MASK_BLTU) +DECLARE_INSN(bgeu, MATCH_BGEU, MASK_BGEU) +DECLARE_INSN(jalr, MATCH_JALR, MASK_JALR) +DECLARE_INSN(jal, MATCH_JAL, MASK_JAL) +DECLARE_INSN(lui, MATCH_LUI, MASK_LUI) +DECLARE_INSN(auipc, MATCH_AUIPC, MASK_AUIPC) +DECLARE_INSN(addi, MATCH_ADDI, MASK_ADDI) +DECLARE_INSN(slti, MATCH_SLTI, MASK_SLTI) +DECLARE_INSN(sltiu, MATCH_SLTIU, MASK_SLTIU) +DECLARE_INSN(xori, MATCH_XORI, MASK_XORI) +DECLARE_INSN(ori, MATCH_ORI, MASK_ORI) +DECLARE_INSN(andi, MATCH_ANDI, MASK_ANDI) +DECLARE_INSN(add, MATCH_ADD, MASK_ADD) +DECLARE_INSN(sub, MATCH_SUB, MASK_SUB) +DECLARE_INSN(sll, MATCH_SLL, MASK_SLL) +DECLARE_INSN(slt, MATCH_SLT, MASK_SLT) +DECLARE_INSN(sltu, MATCH_SLTU, MASK_SLTU) +DECLARE_INSN(xor, MATCH_XOR, MASK_XOR) +DECLARE_INSN(srl, MATCH_SRL, MASK_SRL) +DECLARE_INSN(sra, MATCH_SRA, MASK_SRA) +DECLARE_INSN(or, MATCH_OR, MASK_OR) +DECLARE_INSN(and, MATCH_AND, MASK_AND) +DECLARE_INSN(lb, MATCH_LB, MASK_LB) +DECLARE_INSN(lh, MATCH_LH, MASK_LH) +DECLARE_INSN(lw, MATCH_LW, MASK_LW) +DECLARE_INSN(lbu, MATCH_LBU, MASK_LBU) +DECLARE_INSN(lhu, MATCH_LHU, MASK_LHU) +DECLARE_INSN(sb, MATCH_SB, MASK_SB) +DECLARE_INSN(sh, MATCH_SH, MASK_SH) +DECLARE_INSN(sw, MATCH_SW, MASK_SW) +DECLARE_INSN(fence, MATCH_FENCE, MASK_FENCE) +DECLARE_INSN(fence_i, MATCH_FENCE_I, MASK_FENCE_I) +DECLARE_INSN(addiw, MATCH_ADDIW, MASK_ADDIW) +DECLARE_INSN(slliw, MATCH_SLLIW, MASK_SLLIW) +DECLARE_INSN(srliw, MATCH_SRLIW, MASK_SRLIW) +DECLARE_INSN(sraiw, MATCH_SRAIW, MASK_SRAIW) +DECLARE_INSN(addw, MATCH_ADDW, MASK_ADDW) +DECLARE_INSN(subw, MATCH_SUBW, MASK_SUBW) +DECLARE_INSN(sllw, MATCH_SLLW, MASK_SLLW) +DECLARE_INSN(srlw, MATCH_SRLW, MASK_SRLW) +DECLARE_INSN(sraw, MATCH_SRAW, MASK_SRAW) +DECLARE_INSN(ld, MATCH_LD, MASK_LD) +DECLARE_INSN(lwu, MATCH_LWU, MASK_LWU) +DECLARE_INSN(sd, MATCH_SD, MASK_SD) +DECLARE_INSN(slli, MATCH_SLLI, MASK_SLLI) +DECLARE_INSN(srli, MATCH_SRLI, MASK_SRLI) +DECLARE_INSN(srai, MATCH_SRAI, MASK_SRAI) +DECLARE_INSN(mul, MATCH_MUL, MASK_MUL) +DECLARE_INSN(mulh, MATCH_MULH, MASK_MULH) +DECLARE_INSN(mulhsu, MATCH_MULHSU, MASK_MULHSU) +DECLARE_INSN(mulhu, MATCH_MULHU, MASK_MULHU) +DECLARE_INSN(div, MATCH_DIV, MASK_DIV) +DECLARE_INSN(divu, MATCH_DIVU, MASK_DIVU) +DECLARE_INSN(rem, MATCH_REM, MASK_REM) +DECLARE_INSN(remu, MATCH_REMU, MASK_REMU) +DECLARE_INSN(mulw, MATCH_MULW, MASK_MULW) +DECLARE_INSN(divw, MATCH_DIVW, MASK_DIVW) +DECLARE_INSN(divuw, MATCH_DIVUW, MASK_DIVUW) +DECLARE_INSN(remw, MATCH_REMW, MASK_REMW) +DECLARE_INSN(remuw, MATCH_REMUW, MASK_REMUW) +DECLARE_INSN(amoadd_w, MATCH_AMOADD_W, MASK_AMOADD_W) +DECLARE_INSN(amoxor_w, MATCH_AMOXOR_W, MASK_AMOXOR_W) +DECLARE_INSN(amoor_w, MATCH_AMOOR_W, MASK_AMOOR_W) +DECLARE_INSN(amoand_w, MATCH_AMOAND_W, MASK_AMOAND_W) +DECLARE_INSN(amomin_w, MATCH_AMOMIN_W, MASK_AMOMIN_W) +DECLARE_INSN(amomax_w, MATCH_AMOMAX_W, MASK_AMOMAX_W) +DECLARE_INSN(amominu_w, MATCH_AMOMINU_W, MASK_AMOMINU_W) +DECLARE_INSN(amomaxu_w, MATCH_AMOMAXU_W, MASK_AMOMAXU_W) +DECLARE_INSN(amoswap_w, MATCH_AMOSWAP_W, MASK_AMOSWAP_W) +DECLARE_INSN(lr_w, MATCH_LR_W, MASK_LR_W) +DECLARE_INSN(sc_w, MATCH_SC_W, MASK_SC_W) +DECLARE_INSN(amoadd_d, MATCH_AMOADD_D, MASK_AMOADD_D) +DECLARE_INSN(amoxor_d, MATCH_AMOXOR_D, MASK_AMOXOR_D) +DECLARE_INSN(amoor_d, MATCH_AMOOR_D, MASK_AMOOR_D) +DECLARE_INSN(amoand_d, MATCH_AMOAND_D, MASK_AMOAND_D) +DECLARE_INSN(amomin_d, MATCH_AMOMIN_D, MASK_AMOMIN_D) +DECLARE_INSN(amomax_d, MATCH_AMOMAX_D, MASK_AMOMAX_D) +DECLARE_INSN(amominu_d, MATCH_AMOMINU_D, MASK_AMOMINU_D) +DECLARE_INSN(amomaxu_d, MATCH_AMOMAXU_D, MASK_AMOMAXU_D) +DECLARE_INSN(amoswap_d, MATCH_AMOSWAP_D, MASK_AMOSWAP_D) +DECLARE_INSN(lr_d, MATCH_LR_D, MASK_LR_D) +DECLARE_INSN(sc_d, MATCH_SC_D, MASK_SC_D) +DECLARE_INSN(hfence_vvma, MATCH_HFENCE_VVMA, MASK_HFENCE_VVMA) +DECLARE_INSN(hfence_gvma, MATCH_HFENCE_GVMA, MASK_HFENCE_GVMA) +DECLARE_INSN(hlv_b, MATCH_HLV_B, MASK_HLV_B) +DECLARE_INSN(hlv_bu, MATCH_HLV_BU, MASK_HLV_BU) +DECLARE_INSN(hlv_h, MATCH_HLV_H, MASK_HLV_H) +DECLARE_INSN(hlv_hu, MATCH_HLV_HU, MASK_HLV_HU) +DECLARE_INSN(hlvx_hu, MATCH_HLVX_HU, MASK_HLVX_HU) +DECLARE_INSN(hlv_w, MATCH_HLV_W, MASK_HLV_W) +DECLARE_INSN(hlvx_wu, MATCH_HLVX_WU, MASK_HLVX_WU) +DECLARE_INSN(hsv_b, MATCH_HSV_B, MASK_HSV_B) +DECLARE_INSN(hsv_h, MATCH_HSV_H, MASK_HSV_H) +DECLARE_INSN(hsv_w, MATCH_HSV_W, MASK_HSV_W) +DECLARE_INSN(hlv_wu, MATCH_HLV_WU, MASK_HLV_WU) +DECLARE_INSN(hlv_d, MATCH_HLV_D, MASK_HLV_D) +DECLARE_INSN(hsv_d, MATCH_HSV_D, MASK_HSV_D) +DECLARE_INSN(fadd_s, MATCH_FADD_S, MASK_FADD_S) +DECLARE_INSN(fsub_s, MATCH_FSUB_S, MASK_FSUB_S) +DECLARE_INSN(fmul_s, MATCH_FMUL_S, MASK_FMUL_S) +DECLARE_INSN(fdiv_s, MATCH_FDIV_S, MASK_FDIV_S) +DECLARE_INSN(fsgnj_s, MATCH_FSGNJ_S, MASK_FSGNJ_S) +DECLARE_INSN(fsgnjn_s, MATCH_FSGNJN_S, MASK_FSGNJN_S) +DECLARE_INSN(fsgnjx_s, MATCH_FSGNJX_S, MASK_FSGNJX_S) +DECLARE_INSN(fmin_s, MATCH_FMIN_S, MASK_FMIN_S) +DECLARE_INSN(fmax_s, MATCH_FMAX_S, MASK_FMAX_S) +DECLARE_INSN(fsqrt_s, MATCH_FSQRT_S, MASK_FSQRT_S) +DECLARE_INSN(fle_s, MATCH_FLE_S, MASK_FLE_S) +DECLARE_INSN(flt_s, MATCH_FLT_S, MASK_FLT_S) +DECLARE_INSN(feq_s, MATCH_FEQ_S, MASK_FEQ_S) +DECLARE_INSN(fcvt_w_s, MATCH_FCVT_W_S, MASK_FCVT_W_S) +DECLARE_INSN(fcvt_wu_s, MATCH_FCVT_WU_S, MASK_FCVT_WU_S) +DECLARE_INSN(fmv_x_w, MATCH_FMV_X_W, MASK_FMV_X_W) +DECLARE_INSN(fclass_s, MATCH_FCLASS_S, MASK_FCLASS_S) +DECLARE_INSN(fcvt_s_w, MATCH_FCVT_S_W, MASK_FCVT_S_W) +DECLARE_INSN(fcvt_s_wu, MATCH_FCVT_S_WU, MASK_FCVT_S_WU) +DECLARE_INSN(fmv_w_x, MATCH_FMV_W_X, MASK_FMV_W_X) +DECLARE_INSN(flw, MATCH_FLW, MASK_FLW) +DECLARE_INSN(fsw, MATCH_FSW, MASK_FSW) +DECLARE_INSN(fmadd_s, MATCH_FMADD_S, MASK_FMADD_S) +DECLARE_INSN(fmsub_s, MATCH_FMSUB_S, MASK_FMSUB_S) +DECLARE_INSN(fnmsub_s, MATCH_FNMSUB_S, MASK_FNMSUB_S) +DECLARE_INSN(fnmadd_s, MATCH_FNMADD_S, MASK_FNMADD_S) +DECLARE_INSN(fcvt_l_s, MATCH_FCVT_L_S, MASK_FCVT_L_S) +DECLARE_INSN(fcvt_lu_s, MATCH_FCVT_LU_S, MASK_FCVT_LU_S) +DECLARE_INSN(fcvt_s_l, MATCH_FCVT_S_L, MASK_FCVT_S_L) +DECLARE_INSN(fcvt_s_lu, MATCH_FCVT_S_LU, MASK_FCVT_S_LU) +DECLARE_INSN(fadd_d, MATCH_FADD_D, MASK_FADD_D) +DECLARE_INSN(fsub_d, MATCH_FSUB_D, MASK_FSUB_D) +DECLARE_INSN(fmul_d, MATCH_FMUL_D, MASK_FMUL_D) +DECLARE_INSN(fdiv_d, MATCH_FDIV_D, MASK_FDIV_D) +DECLARE_INSN(fsgnj_d, MATCH_FSGNJ_D, MASK_FSGNJ_D) +DECLARE_INSN(fsgnjn_d, MATCH_FSGNJN_D, MASK_FSGNJN_D) +DECLARE_INSN(fsgnjx_d, MATCH_FSGNJX_D, MASK_FSGNJX_D) +DECLARE_INSN(fmin_d, MATCH_FMIN_D, MASK_FMIN_D) +DECLARE_INSN(fmax_d, MATCH_FMAX_D, MASK_FMAX_D) +DECLARE_INSN(fcvt_s_d, MATCH_FCVT_S_D, MASK_FCVT_S_D) +DECLARE_INSN(fcvt_d_s, MATCH_FCVT_D_S, MASK_FCVT_D_S) +DECLARE_INSN(fsqrt_d, MATCH_FSQRT_D, MASK_FSQRT_D) +DECLARE_INSN(fle_d, MATCH_FLE_D, MASK_FLE_D) +DECLARE_INSN(flt_d, MATCH_FLT_D, MASK_FLT_D) +DECLARE_INSN(feq_d, MATCH_FEQ_D, MASK_FEQ_D) +DECLARE_INSN(fcvt_w_d, MATCH_FCVT_W_D, MASK_FCVT_W_D) +DECLARE_INSN(fcvt_wu_d, MATCH_FCVT_WU_D, MASK_FCVT_WU_D) +DECLARE_INSN(fclass_d, MATCH_FCLASS_D, MASK_FCLASS_D) +DECLARE_INSN(fcvt_d_w, MATCH_FCVT_D_W, MASK_FCVT_D_W) +DECLARE_INSN(fcvt_d_wu, MATCH_FCVT_D_WU, MASK_FCVT_D_WU) +DECLARE_INSN(fld, MATCH_FLD, MASK_FLD) +DECLARE_INSN(fsd, MATCH_FSD, MASK_FSD) +DECLARE_INSN(fmadd_d, MATCH_FMADD_D, MASK_FMADD_D) +DECLARE_INSN(fmsub_d, MATCH_FMSUB_D, MASK_FMSUB_D) +DECLARE_INSN(fnmsub_d, MATCH_FNMSUB_D, MASK_FNMSUB_D) +DECLARE_INSN(fnmadd_d, MATCH_FNMADD_D, MASK_FNMADD_D) +DECLARE_INSN(fcvt_l_d, MATCH_FCVT_L_D, MASK_FCVT_L_D) +DECLARE_INSN(fcvt_lu_d, MATCH_FCVT_LU_D, MASK_FCVT_LU_D) +DECLARE_INSN(fmv_x_d, MATCH_FMV_X_D, MASK_FMV_X_D) +DECLARE_INSN(fcvt_d_l, MATCH_FCVT_D_L, MASK_FCVT_D_L) +DECLARE_INSN(fcvt_d_lu, MATCH_FCVT_D_LU, MASK_FCVT_D_LU) +DECLARE_INSN(fmv_d_x, MATCH_FMV_D_X, MASK_FMV_D_X) +DECLARE_INSN(fadd_q, MATCH_FADD_Q, MASK_FADD_Q) +DECLARE_INSN(fsub_q, MATCH_FSUB_Q, MASK_FSUB_Q) +DECLARE_INSN(fmul_q, MATCH_FMUL_Q, MASK_FMUL_Q) +DECLARE_INSN(fdiv_q, MATCH_FDIV_Q, MASK_FDIV_Q) +DECLARE_INSN(fsgnj_q, MATCH_FSGNJ_Q, MASK_FSGNJ_Q) +DECLARE_INSN(fsgnjn_q, MATCH_FSGNJN_Q, MASK_FSGNJN_Q) +DECLARE_INSN(fsgnjx_q, MATCH_FSGNJX_Q, MASK_FSGNJX_Q) +DECLARE_INSN(fmin_q, MATCH_FMIN_Q, MASK_FMIN_Q) +DECLARE_INSN(fmax_q, MATCH_FMAX_Q, MASK_FMAX_Q) +DECLARE_INSN(fcvt_s_q, MATCH_FCVT_S_Q, MASK_FCVT_S_Q) +DECLARE_INSN(fcvt_q_s, MATCH_FCVT_Q_S, MASK_FCVT_Q_S) +DECLARE_INSN(fcvt_d_q, MATCH_FCVT_D_Q, MASK_FCVT_D_Q) +DECLARE_INSN(fcvt_q_d, MATCH_FCVT_Q_D, MASK_FCVT_Q_D) +DECLARE_INSN(fsqrt_q, MATCH_FSQRT_Q, MASK_FSQRT_Q) +DECLARE_INSN(fle_q, MATCH_FLE_Q, MASK_FLE_Q) +DECLARE_INSN(flt_q, MATCH_FLT_Q, MASK_FLT_Q) +DECLARE_INSN(feq_q, MATCH_FEQ_Q, MASK_FEQ_Q) +DECLARE_INSN(fcvt_w_q, MATCH_FCVT_W_Q, MASK_FCVT_W_Q) +DECLARE_INSN(fcvt_wu_q, MATCH_FCVT_WU_Q, MASK_FCVT_WU_Q) +DECLARE_INSN(fclass_q, MATCH_FCLASS_Q, MASK_FCLASS_Q) +DECLARE_INSN(fcvt_q_w, MATCH_FCVT_Q_W, MASK_FCVT_Q_W) +DECLARE_INSN(fcvt_q_wu, MATCH_FCVT_Q_WU, MASK_FCVT_Q_WU) +DECLARE_INSN(flq, MATCH_FLQ, MASK_FLQ) +DECLARE_INSN(fsq, MATCH_FSQ, MASK_FSQ) +DECLARE_INSN(fmadd_q, MATCH_FMADD_Q, MASK_FMADD_Q) +DECLARE_INSN(fmsub_q, MATCH_FMSUB_Q, MASK_FMSUB_Q) +DECLARE_INSN(fnmsub_q, MATCH_FNMSUB_Q, MASK_FNMSUB_Q) +DECLARE_INSN(fnmadd_q, MATCH_FNMADD_Q, MASK_FNMADD_Q) +DECLARE_INSN(fcvt_l_q, MATCH_FCVT_L_Q, MASK_FCVT_L_Q) +DECLARE_INSN(fcvt_lu_q, MATCH_FCVT_LU_Q, MASK_FCVT_LU_Q) +DECLARE_INSN(fcvt_q_l, MATCH_FCVT_Q_L, MASK_FCVT_Q_L) +DECLARE_INSN(fcvt_q_lu, MATCH_FCVT_Q_LU, MASK_FCVT_Q_LU) +DECLARE_INSN(andn, MATCH_ANDN, MASK_ANDN) +DECLARE_INSN(orn, MATCH_ORN, MASK_ORN) +DECLARE_INSN(xnor, MATCH_XNOR, MASK_XNOR) +DECLARE_INSN(slo, MATCH_SLO, MASK_SLO) +DECLARE_INSN(sro, MATCH_SRO, MASK_SRO) +DECLARE_INSN(rol, MATCH_ROL, MASK_ROL) +DECLARE_INSN(ror, MATCH_ROR, MASK_ROR) +DECLARE_INSN(bclr, MATCH_BCLR, MASK_BCLR) +DECLARE_INSN(bset, MATCH_BSET, MASK_BSET) +DECLARE_INSN(binv, MATCH_BINV, MASK_BINV) +DECLARE_INSN(bext, MATCH_BEXT, MASK_BEXT) +DECLARE_INSN(gorc, MATCH_GORC, MASK_GORC) +DECLARE_INSN(grev, MATCH_GREV, MASK_GREV) +DECLARE_INSN(sloi, MATCH_SLOI, MASK_SLOI) +DECLARE_INSN(sroi, MATCH_SROI, MASK_SROI) +DECLARE_INSN(rori, MATCH_RORI, MASK_RORI) +DECLARE_INSN(bclri, MATCH_BCLRI, MASK_BCLRI) +DECLARE_INSN(bseti, MATCH_BSETI, MASK_BSETI) +DECLARE_INSN(binvi, MATCH_BINVI, MASK_BINVI) +DECLARE_INSN(bexti, MATCH_BEXTI, MASK_BEXTI) +DECLARE_INSN(gorci, MATCH_GORCI, MASK_GORCI) +DECLARE_INSN(grevi, MATCH_GREVI, MASK_GREVI) +DECLARE_INSN(cmix, MATCH_CMIX, MASK_CMIX) +DECLARE_INSN(cmov, MATCH_CMOV, MASK_CMOV) +DECLARE_INSN(fsl, MATCH_FSL, MASK_FSL) +DECLARE_INSN(fsr, MATCH_FSR, MASK_FSR) +DECLARE_INSN(fsri, MATCH_FSRI, MASK_FSRI) +DECLARE_INSN(clz, MATCH_CLZ, MASK_CLZ) +DECLARE_INSN(ctz, MATCH_CTZ, MASK_CTZ) +DECLARE_INSN(cpop, MATCH_CPOP, MASK_CPOP) +DECLARE_INSN(sext_b, MATCH_SEXT_B, MASK_SEXT_B) +DECLARE_INSN(sext_h, MATCH_SEXT_H, MASK_SEXT_H) +DECLARE_INSN(crc32_b, MATCH_CRC32_B, MASK_CRC32_B) +DECLARE_INSN(crc32_h, MATCH_CRC32_H, MASK_CRC32_H) +DECLARE_INSN(crc32_w, MATCH_CRC32_W, MASK_CRC32_W) +DECLARE_INSN(crc32c_b, MATCH_CRC32C_B, MASK_CRC32C_B) +DECLARE_INSN(crc32c_h, MATCH_CRC32C_H, MASK_CRC32C_H) +DECLARE_INSN(crc32c_w, MATCH_CRC32C_W, MASK_CRC32C_W) +DECLARE_INSN(sh1add, MATCH_SH1ADD, MASK_SH1ADD) +DECLARE_INSN(sh2add, MATCH_SH2ADD, MASK_SH2ADD) +DECLARE_INSN(sh3add, MATCH_SH3ADD, MASK_SH3ADD) +DECLARE_INSN(clmul, MATCH_CLMUL, MASK_CLMUL) +DECLARE_INSN(clmulr, MATCH_CLMULR, MASK_CLMULR) +DECLARE_INSN(clmulh, MATCH_CLMULH, MASK_CLMULH) +DECLARE_INSN(min, MATCH_MIN, MASK_MIN) +DECLARE_INSN(minu, MATCH_MINU, MASK_MINU) +DECLARE_INSN(max, MATCH_MAX, MASK_MAX) +DECLARE_INSN(maxu, MATCH_MAXU, MASK_MAXU) +DECLARE_INSN(shfl, MATCH_SHFL, MASK_SHFL) +DECLARE_INSN(unshfl, MATCH_UNSHFL, MASK_UNSHFL) +DECLARE_INSN(bcompress, MATCH_BCOMPRESS, MASK_BCOMPRESS) +DECLARE_INSN(bdecompress, MATCH_BDECOMPRESS, MASK_BDECOMPRESS) +DECLARE_INSN(pack, MATCH_PACK, MASK_PACK) +DECLARE_INSN(packu, MATCH_PACKU, MASK_PACKU) +DECLARE_INSN(packh, MATCH_PACKH, MASK_PACKH) +DECLARE_INSN(bfp, MATCH_BFP, MASK_BFP) +DECLARE_INSN(shfli, MATCH_SHFLI, MASK_SHFLI) +DECLARE_INSN(unshfli, MATCH_UNSHFLI, MASK_UNSHFLI) +DECLARE_INSN(xperm4, MATCH_XPERM4, MASK_XPERM4) +DECLARE_INSN(xperm8, MATCH_XPERM8, MASK_XPERM8) +DECLARE_INSN(xperm16, MATCH_XPERM16, MASK_XPERM16) +DECLARE_INSN(bmatflip, MATCH_BMATFLIP, MASK_BMATFLIP) +DECLARE_INSN(crc32_d, MATCH_CRC32_D, MASK_CRC32_D) +DECLARE_INSN(crc32c_d, MATCH_CRC32C_D, MASK_CRC32C_D) +DECLARE_INSN(bmator, MATCH_BMATOR, MASK_BMATOR) +DECLARE_INSN(bmatxor, MATCH_BMATXOR, MASK_BMATXOR) +DECLARE_INSN(slli_uw, MATCH_SLLI_UW, MASK_SLLI_UW) +DECLARE_INSN(add_uw, MATCH_ADD_UW, MASK_ADD_UW) +DECLARE_INSN(slow, MATCH_SLOW, MASK_SLOW) +DECLARE_INSN(srow, MATCH_SROW, MASK_SROW) +DECLARE_INSN(rolw, MATCH_ROLW, MASK_ROLW) +DECLARE_INSN(rorw, MATCH_RORW, MASK_RORW) +DECLARE_INSN(gorcw, MATCH_GORCW, MASK_GORCW) +DECLARE_INSN(grevw, MATCH_GREVW, MASK_GREVW) +DECLARE_INSN(sloiw, MATCH_SLOIW, MASK_SLOIW) +DECLARE_INSN(sroiw, MATCH_SROIW, MASK_SROIW) +DECLARE_INSN(roriw, MATCH_RORIW, MASK_RORIW) +DECLARE_INSN(gorciw, MATCH_GORCIW, MASK_GORCIW) +DECLARE_INSN(greviw, MATCH_GREVIW, MASK_GREVIW) +DECLARE_INSN(fslw, MATCH_FSLW, MASK_FSLW) +DECLARE_INSN(fsrw, MATCH_FSRW, MASK_FSRW) +DECLARE_INSN(fsriw, MATCH_FSRIW, MASK_FSRIW) +DECLARE_INSN(clzw, MATCH_CLZW, MASK_CLZW) +DECLARE_INSN(ctzw, MATCH_CTZW, MASK_CTZW) +DECLARE_INSN(cpopw, MATCH_CPOPW, MASK_CPOPW) +DECLARE_INSN(sh1add_uw, MATCH_SH1ADD_UW, MASK_SH1ADD_UW) +DECLARE_INSN(sh2add_uw, MATCH_SH2ADD_UW, MASK_SH2ADD_UW) +DECLARE_INSN(sh3add_uw, MATCH_SH3ADD_UW, MASK_SH3ADD_UW) +DECLARE_INSN(shflw, MATCH_SHFLW, MASK_SHFLW) +DECLARE_INSN(unshflw, MATCH_UNSHFLW, MASK_UNSHFLW) +DECLARE_INSN(bcompressw, MATCH_BCOMPRESSW, MASK_BCOMPRESSW) +DECLARE_INSN(bdecompressw, MATCH_BDECOMPRESSW, MASK_BDECOMPRESSW) +DECLARE_INSN(packw, MATCH_PACKW, MASK_PACKW) +DECLARE_INSN(packuw, MATCH_PACKUW, MASK_PACKUW) +DECLARE_INSN(bfpw, MATCH_BFPW, MASK_BFPW) +DECLARE_INSN(xperm32, MATCH_XPERM32, MASK_XPERM32) +DECLARE_INSN(ecall, MATCH_ECALL, MASK_ECALL) +DECLARE_INSN(ebreak, MATCH_EBREAK, MASK_EBREAK) +DECLARE_INSN(sret, MATCH_SRET, MASK_SRET) +DECLARE_INSN(mret, MATCH_MRET, MASK_MRET) +DECLARE_INSN(dret, MATCH_DRET, MASK_DRET) +DECLARE_INSN(sfence_vma, MATCH_SFENCE_VMA, MASK_SFENCE_VMA) +DECLARE_INSN(wfi, MATCH_WFI, MASK_WFI) +DECLARE_INSN(csrrw, MATCH_CSRRW, MASK_CSRRW) +DECLARE_INSN(csrrs, MATCH_CSRRS, MASK_CSRRS) +DECLARE_INSN(csrrc, MATCH_CSRRC, MASK_CSRRC) +DECLARE_INSN(csrrwi, MATCH_CSRRWI, MASK_CSRRWI) +DECLARE_INSN(csrrsi, MATCH_CSRRSI, MASK_CSRRSI) +DECLARE_INSN(csrrci, MATCH_CSRRCI, MASK_CSRRCI) +DECLARE_INSN(sinval_vma, MATCH_SINVAL_VMA, MASK_SINVAL_VMA) +DECLARE_INSN(sfence_w_inval, MATCH_SFENCE_W_INVAL, MASK_SFENCE_W_INVAL) +DECLARE_INSN(sfence_inval_ir, MATCH_SFENCE_INVAL_IR, MASK_SFENCE_INVAL_IR) +DECLARE_INSN(hinval_vvma, MATCH_HINVAL_VVMA, MASK_HINVAL_VVMA) +DECLARE_INSN(hinval_gvma, MATCH_HINVAL_GVMA, MASK_HINVAL_GVMA) +DECLARE_INSN(fadd_h, MATCH_FADD_H, MASK_FADD_H) +DECLARE_INSN(fsub_h, MATCH_FSUB_H, MASK_FSUB_H) +DECLARE_INSN(fmul_h, MATCH_FMUL_H, MASK_FMUL_H) +DECLARE_INSN(fdiv_h, MATCH_FDIV_H, MASK_FDIV_H) +DECLARE_INSN(fsgnj_h, MATCH_FSGNJ_H, MASK_FSGNJ_H) +DECLARE_INSN(fsgnjn_h, MATCH_FSGNJN_H, MASK_FSGNJN_H) +DECLARE_INSN(fsgnjx_h, MATCH_FSGNJX_H, MASK_FSGNJX_H) +DECLARE_INSN(fmin_h, MATCH_FMIN_H, MASK_FMIN_H) +DECLARE_INSN(fmax_h, MATCH_FMAX_H, MASK_FMAX_H) +DECLARE_INSN(fcvt_h_s, MATCH_FCVT_H_S, MASK_FCVT_H_S) +DECLARE_INSN(fcvt_s_h, MATCH_FCVT_S_H, MASK_FCVT_S_H) +DECLARE_INSN(fsqrt_h, MATCH_FSQRT_H, MASK_FSQRT_H) +DECLARE_INSN(fle_h, MATCH_FLE_H, MASK_FLE_H) +DECLARE_INSN(flt_h, MATCH_FLT_H, MASK_FLT_H) +DECLARE_INSN(feq_h, MATCH_FEQ_H, MASK_FEQ_H) +DECLARE_INSN(fcvt_w_h, MATCH_FCVT_W_H, MASK_FCVT_W_H) +DECLARE_INSN(fcvt_wu_h, MATCH_FCVT_WU_H, MASK_FCVT_WU_H) +DECLARE_INSN(fmv_x_h, MATCH_FMV_X_H, MASK_FMV_X_H) +DECLARE_INSN(fclass_h, MATCH_FCLASS_H, MASK_FCLASS_H) +DECLARE_INSN(fcvt_h_w, MATCH_FCVT_H_W, MASK_FCVT_H_W) +DECLARE_INSN(fcvt_h_wu, MATCH_FCVT_H_WU, MASK_FCVT_H_WU) +DECLARE_INSN(fmv_h_x, MATCH_FMV_H_X, MASK_FMV_H_X) +DECLARE_INSN(flh, MATCH_FLH, MASK_FLH) +DECLARE_INSN(fsh, MATCH_FSH, MASK_FSH) +DECLARE_INSN(fmadd_h, MATCH_FMADD_H, MASK_FMADD_H) +DECLARE_INSN(fmsub_h, MATCH_FMSUB_H, MASK_FMSUB_H) +DECLARE_INSN(fnmsub_h, MATCH_FNMSUB_H, MASK_FNMSUB_H) +DECLARE_INSN(fnmadd_h, MATCH_FNMADD_H, MASK_FNMADD_H) +DECLARE_INSN(fcvt_h_d, MATCH_FCVT_H_D, MASK_FCVT_H_D) +DECLARE_INSN(fcvt_d_h, MATCH_FCVT_D_H, MASK_FCVT_D_H) +DECLARE_INSN(fcvt_h_q, MATCH_FCVT_H_Q, MASK_FCVT_H_Q) +DECLARE_INSN(fcvt_q_h, MATCH_FCVT_Q_H, MASK_FCVT_Q_H) +DECLARE_INSN(fcvt_l_h, MATCH_FCVT_L_H, MASK_FCVT_L_H) +DECLARE_INSN(fcvt_lu_h, MATCH_FCVT_LU_H, MASK_FCVT_LU_H) +DECLARE_INSN(fcvt_h_l, MATCH_FCVT_H_L, MASK_FCVT_H_L) +DECLARE_INSN(fcvt_h_lu, MATCH_FCVT_H_LU, MASK_FCVT_H_LU) +DECLARE_INSN(sm4ed, MATCH_SM4ED, MASK_SM4ED) +DECLARE_INSN(sm4ks, MATCH_SM4KS, MASK_SM4KS) +DECLARE_INSN(sm3p0, MATCH_SM3P0, MASK_SM3P0) +DECLARE_INSN(sm3p1, MATCH_SM3P1, MASK_SM3P1) +DECLARE_INSN(sha256sum0, MATCH_SHA256SUM0, MASK_SHA256SUM0) +DECLARE_INSN(sha256sum1, MATCH_SHA256SUM1, MASK_SHA256SUM1) +DECLARE_INSN(sha256sig0, MATCH_SHA256SIG0, MASK_SHA256SIG0) +DECLARE_INSN(sha256sig1, MATCH_SHA256SIG1, MASK_SHA256SIG1) +DECLARE_INSN(aes32esmi, MATCH_AES32ESMI, MASK_AES32ESMI) +DECLARE_INSN(aes32esi, MATCH_AES32ESI, MASK_AES32ESI) +DECLARE_INSN(aes32dsmi, MATCH_AES32DSMI, MASK_AES32DSMI) +DECLARE_INSN(aes32dsi, MATCH_AES32DSI, MASK_AES32DSI) +DECLARE_INSN(sha512sum0r, MATCH_SHA512SUM0R, MASK_SHA512SUM0R) +DECLARE_INSN(sha512sum1r, MATCH_SHA512SUM1R, MASK_SHA512SUM1R) +DECLARE_INSN(sha512sig0l, MATCH_SHA512SIG0L, MASK_SHA512SIG0L) +DECLARE_INSN(sha512sig0h, MATCH_SHA512SIG0H, MASK_SHA512SIG0H) +DECLARE_INSN(sha512sig1l, MATCH_SHA512SIG1L, MASK_SHA512SIG1L) +DECLARE_INSN(sha512sig1h, MATCH_SHA512SIG1H, MASK_SHA512SIG1H) +DECLARE_INSN(aes64ks1i, MATCH_AES64KS1I, MASK_AES64KS1I) +DECLARE_INSN(aes64im, MATCH_AES64IM, MASK_AES64IM) +DECLARE_INSN(aes64ks2, MATCH_AES64KS2, MASK_AES64KS2) +DECLARE_INSN(aes64esm, MATCH_AES64ESM, MASK_AES64ESM) +DECLARE_INSN(aes64es, MATCH_AES64ES, MASK_AES64ES) +DECLARE_INSN(aes64dsm, MATCH_AES64DSM, MASK_AES64DSM) +DECLARE_INSN(aes64ds, MATCH_AES64DS, MASK_AES64DS) +DECLARE_INSN(sha512sum0, MATCH_SHA512SUM0, MASK_SHA512SUM0) +DECLARE_INSN(sha512sum1, MATCH_SHA512SUM1, MASK_SHA512SUM1) +DECLARE_INSN(sha512sig0, MATCH_SHA512SIG0, MASK_SHA512SIG0) +DECLARE_INSN(sha512sig1, MATCH_SHA512SIG1, MASK_SHA512SIG1) +DECLARE_INSN(cbo_clean, MATCH_CBO_CLEAN, MASK_CBO_CLEAN) +DECLARE_INSN(cbo_flush, MATCH_CBO_FLUSH, MASK_CBO_FLUSH) +DECLARE_INSN(cbo_inval, MATCH_CBO_INVAL, MASK_CBO_INVAL) +DECLARE_INSN(cbo_zero, MATCH_CBO_ZERO, MASK_CBO_ZERO) +DECLARE_INSN(prefetch_i, MATCH_PREFETCH_I, MASK_PREFETCH_I) +DECLARE_INSN(prefetch_r, MATCH_PREFETCH_R, MASK_PREFETCH_R) +DECLARE_INSN(prefetch_w, MATCH_PREFETCH_W, MASK_PREFETCH_W) +DECLARE_INSN(c_nop, MATCH_C_NOP, MASK_C_NOP) +DECLARE_INSN(c_addi16sp, MATCH_C_ADDI16SP, MASK_C_ADDI16SP) +DECLARE_INSN(c_jr, MATCH_C_JR, MASK_C_JR) +DECLARE_INSN(c_jalr, MATCH_C_JALR, MASK_C_JALR) +DECLARE_INSN(c_ebreak, MATCH_C_EBREAK, MASK_C_EBREAK) +DECLARE_INSN(c_addi4spn, MATCH_C_ADDI4SPN, MASK_C_ADDI4SPN) +DECLARE_INSN(c_fld, MATCH_C_FLD, MASK_C_FLD) +DECLARE_INSN(c_lw, MATCH_C_LW, MASK_C_LW) +DECLARE_INSN(c_flw, MATCH_C_FLW, MASK_C_FLW) +DECLARE_INSN(c_fsd, MATCH_C_FSD, MASK_C_FSD) +DECLARE_INSN(c_sw, MATCH_C_SW, MASK_C_SW) +DECLARE_INSN(c_fsw, MATCH_C_FSW, MASK_C_FSW) +DECLARE_INSN(c_addi, MATCH_C_ADDI, MASK_C_ADDI) +DECLARE_INSN(c_jal, MATCH_C_JAL, MASK_C_JAL) +DECLARE_INSN(c_li, MATCH_C_LI, MASK_C_LI) +DECLARE_INSN(c_lui, MATCH_C_LUI, MASK_C_LUI) +DECLARE_INSN(c_srli, MATCH_C_SRLI, MASK_C_SRLI) +DECLARE_INSN(c_srai, MATCH_C_SRAI, MASK_C_SRAI) +DECLARE_INSN(c_andi, MATCH_C_ANDI, MASK_C_ANDI) +DECLARE_INSN(c_sub, MATCH_C_SUB, MASK_C_SUB) +DECLARE_INSN(c_xor, MATCH_C_XOR, MASK_C_XOR) +DECLARE_INSN(c_or, MATCH_C_OR, MASK_C_OR) +DECLARE_INSN(c_and, MATCH_C_AND, MASK_C_AND) +DECLARE_INSN(c_j, MATCH_C_J, MASK_C_J) +DECLARE_INSN(c_beqz, MATCH_C_BEQZ, MASK_C_BEQZ) +DECLARE_INSN(c_bnez, MATCH_C_BNEZ, MASK_C_BNEZ) +DECLARE_INSN(c_slli, MATCH_C_SLLI, MASK_C_SLLI) +DECLARE_INSN(c_fldsp, MATCH_C_FLDSP, MASK_C_FLDSP) +DECLARE_INSN(c_lwsp, MATCH_C_LWSP, MASK_C_LWSP) +DECLARE_INSN(c_flwsp, MATCH_C_FLWSP, MASK_C_FLWSP) +DECLARE_INSN(c_mv, MATCH_C_MV, MASK_C_MV) +DECLARE_INSN(c_add, MATCH_C_ADD, MASK_C_ADD) +DECLARE_INSN(c_fsdsp, MATCH_C_FSDSP, MASK_C_FSDSP) +DECLARE_INSN(c_swsp, MATCH_C_SWSP, MASK_C_SWSP) +DECLARE_INSN(c_fswsp, MATCH_C_FSWSP, MASK_C_FSWSP) +DECLARE_INSN(c_srli_rv32, MATCH_C_SRLI_RV32, MASK_C_SRLI_RV32) +DECLARE_INSN(c_srai_rv32, MATCH_C_SRAI_RV32, MASK_C_SRAI_RV32) +DECLARE_INSN(c_slli_rv32, MATCH_C_SLLI_RV32, MASK_C_SLLI_RV32) +DECLARE_INSN(c_ld, MATCH_C_LD, MASK_C_LD) +DECLARE_INSN(c_sd, MATCH_C_SD, MASK_C_SD) +DECLARE_INSN(c_subw, MATCH_C_SUBW, MASK_C_SUBW) +DECLARE_INSN(c_addw, MATCH_C_ADDW, MASK_C_ADDW) +DECLARE_INSN(c_addiw, MATCH_C_ADDIW, MASK_C_ADDIW) +DECLARE_INSN(c_ldsp, MATCH_C_LDSP, MASK_C_LDSP) +DECLARE_INSN(c_sdsp, MATCH_C_SDSP, MASK_C_SDSP) +DECLARE_INSN(custom0, MATCH_CUSTOM0, MASK_CUSTOM0) +DECLARE_INSN(custom0_rs1, MATCH_CUSTOM0_RS1, MASK_CUSTOM0_RS1) +DECLARE_INSN(custom0_rs1_rs2, MATCH_CUSTOM0_RS1_RS2, MASK_CUSTOM0_RS1_RS2) +DECLARE_INSN(custom0_rd, MATCH_CUSTOM0_RD, MASK_CUSTOM0_RD) +DECLARE_INSN(custom0_rd_rs1, MATCH_CUSTOM0_RD_RS1, MASK_CUSTOM0_RD_RS1) +DECLARE_INSN(custom0_rd_rs1_rs2, MATCH_CUSTOM0_RD_RS1_RS2, MASK_CUSTOM0_RD_RS1_RS2) +DECLARE_INSN(custom1, MATCH_CUSTOM1, MASK_CUSTOM1) +DECLARE_INSN(custom1_rs1, MATCH_CUSTOM1_RS1, MASK_CUSTOM1_RS1) +DECLARE_INSN(custom1_rs1_rs2, MATCH_CUSTOM1_RS1_RS2, MASK_CUSTOM1_RS1_RS2) +DECLARE_INSN(custom1_rd, MATCH_CUSTOM1_RD, MASK_CUSTOM1_RD) +DECLARE_INSN(custom1_rd_rs1, MATCH_CUSTOM1_RD_RS1, MASK_CUSTOM1_RD_RS1) +DECLARE_INSN(custom1_rd_rs1_rs2, MATCH_CUSTOM1_RD_RS1_RS2, MASK_CUSTOM1_RD_RS1_RS2) +DECLARE_INSN(custom2, MATCH_CUSTOM2, MASK_CUSTOM2) +DECLARE_INSN(custom2_rs1, MATCH_CUSTOM2_RS1, MASK_CUSTOM2_RS1) +DECLARE_INSN(custom2_rs1_rs2, MATCH_CUSTOM2_RS1_RS2, MASK_CUSTOM2_RS1_RS2) +DECLARE_INSN(custom2_rd, MATCH_CUSTOM2_RD, MASK_CUSTOM2_RD) +DECLARE_INSN(custom2_rd_rs1, MATCH_CUSTOM2_RD_RS1, MASK_CUSTOM2_RD_RS1) +DECLARE_INSN(custom2_rd_rs1_rs2, MATCH_CUSTOM2_RD_RS1_RS2, MASK_CUSTOM2_RD_RS1_RS2) +DECLARE_INSN(custom3, MATCH_CUSTOM3, MASK_CUSTOM3) +DECLARE_INSN(custom3_rs1, MATCH_CUSTOM3_RS1, MASK_CUSTOM3_RS1) +DECLARE_INSN(custom3_rs1_rs2, MATCH_CUSTOM3_RS1_RS2, MASK_CUSTOM3_RS1_RS2) +DECLARE_INSN(custom3_rd, MATCH_CUSTOM3_RD, MASK_CUSTOM3_RD) +DECLARE_INSN(custom3_rd_rs1, MATCH_CUSTOM3_RD_RS1, MASK_CUSTOM3_RD_RS1) +DECLARE_INSN(custom3_rd_rs1_rs2, MATCH_CUSTOM3_RD_RS1_RS2, MASK_CUSTOM3_RD_RS1_RS2) +DECLARE_INSN(vsetivli, MATCH_VSETIVLI, MASK_VSETIVLI) +DECLARE_INSN(vsetvli, MATCH_VSETVLI, MASK_VSETVLI) +DECLARE_INSN(vsetvl, MATCH_VSETVL, MASK_VSETVL) +DECLARE_INSN(vlm_v, MATCH_VLM_V, MASK_VLM_V) +DECLARE_INSN(vsm_v, MATCH_VSM_V, MASK_VSM_V) +DECLARE_INSN(vle8_v, MATCH_VLE8_V, MASK_VLE8_V) +DECLARE_INSN(vle16_v, MATCH_VLE16_V, MASK_VLE16_V) +DECLARE_INSN(vle32_v, MATCH_VLE32_V, MASK_VLE32_V) +DECLARE_INSN(vle64_v, MATCH_VLE64_V, MASK_VLE64_V) +DECLARE_INSN(vle128_v, MATCH_VLE128_V, MASK_VLE128_V) +DECLARE_INSN(vle256_v, MATCH_VLE256_V, MASK_VLE256_V) +DECLARE_INSN(vle512_v, MATCH_VLE512_V, MASK_VLE512_V) +DECLARE_INSN(vle1024_v, MATCH_VLE1024_V, MASK_VLE1024_V) +DECLARE_INSN(vse8_v, MATCH_VSE8_V, MASK_VSE8_V) +DECLARE_INSN(vse16_v, MATCH_VSE16_V, MASK_VSE16_V) +DECLARE_INSN(vse32_v, MATCH_VSE32_V, MASK_VSE32_V) +DECLARE_INSN(vse64_v, MATCH_VSE64_V, MASK_VSE64_V) +DECLARE_INSN(vse128_v, MATCH_VSE128_V, MASK_VSE128_V) +DECLARE_INSN(vse256_v, MATCH_VSE256_V, MASK_VSE256_V) +DECLARE_INSN(vse512_v, MATCH_VSE512_V, MASK_VSE512_V) +DECLARE_INSN(vse1024_v, MATCH_VSE1024_V, MASK_VSE1024_V) +DECLARE_INSN(vluxei8_v, MATCH_VLUXEI8_V, MASK_VLUXEI8_V) +DECLARE_INSN(vluxei16_v, MATCH_VLUXEI16_V, MASK_VLUXEI16_V) +DECLARE_INSN(vluxei32_v, MATCH_VLUXEI32_V, MASK_VLUXEI32_V) +DECLARE_INSN(vluxei64_v, MATCH_VLUXEI64_V, MASK_VLUXEI64_V) +DECLARE_INSN(vluxei128_v, MATCH_VLUXEI128_V, MASK_VLUXEI128_V) +DECLARE_INSN(vluxei256_v, MATCH_VLUXEI256_V, MASK_VLUXEI256_V) +DECLARE_INSN(vluxei512_v, MATCH_VLUXEI512_V, MASK_VLUXEI512_V) +DECLARE_INSN(vluxei1024_v, MATCH_VLUXEI1024_V, MASK_VLUXEI1024_V) +DECLARE_INSN(vsuxei8_v, MATCH_VSUXEI8_V, MASK_VSUXEI8_V) +DECLARE_INSN(vsuxei16_v, MATCH_VSUXEI16_V, MASK_VSUXEI16_V) +DECLARE_INSN(vsuxei32_v, MATCH_VSUXEI32_V, MASK_VSUXEI32_V) +DECLARE_INSN(vsuxei64_v, MATCH_VSUXEI64_V, MASK_VSUXEI64_V) +DECLARE_INSN(vsuxei128_v, MATCH_VSUXEI128_V, MASK_VSUXEI128_V) +DECLARE_INSN(vsuxei256_v, MATCH_VSUXEI256_V, MASK_VSUXEI256_V) +DECLARE_INSN(vsuxei512_v, MATCH_VSUXEI512_V, MASK_VSUXEI512_V) +DECLARE_INSN(vsuxei1024_v, MATCH_VSUXEI1024_V, MASK_VSUXEI1024_V) +DECLARE_INSN(vlse8_v, MATCH_VLSE8_V, MASK_VLSE8_V) +DECLARE_INSN(vlse16_v, MATCH_VLSE16_V, MASK_VLSE16_V) +DECLARE_INSN(vlse32_v, MATCH_VLSE32_V, MASK_VLSE32_V) +DECLARE_INSN(vlse64_v, MATCH_VLSE64_V, MASK_VLSE64_V) +DECLARE_INSN(vlse128_v, MATCH_VLSE128_V, MASK_VLSE128_V) +DECLARE_INSN(vlse256_v, MATCH_VLSE256_V, MASK_VLSE256_V) +DECLARE_INSN(vlse512_v, MATCH_VLSE512_V, MASK_VLSE512_V) +DECLARE_INSN(vlse1024_v, MATCH_VLSE1024_V, MASK_VLSE1024_V) +DECLARE_INSN(vsse8_v, MATCH_VSSE8_V, MASK_VSSE8_V) +DECLARE_INSN(vsse16_v, MATCH_VSSE16_V, MASK_VSSE16_V) +DECLARE_INSN(vsse32_v, MATCH_VSSE32_V, MASK_VSSE32_V) +DECLARE_INSN(vsse64_v, MATCH_VSSE64_V, MASK_VSSE64_V) +DECLARE_INSN(vsse128_v, MATCH_VSSE128_V, MASK_VSSE128_V) +DECLARE_INSN(vsse256_v, MATCH_VSSE256_V, MASK_VSSE256_V) +DECLARE_INSN(vsse512_v, MATCH_VSSE512_V, MASK_VSSE512_V) +DECLARE_INSN(vsse1024_v, MATCH_VSSE1024_V, MASK_VSSE1024_V) +DECLARE_INSN(vloxei8_v, MATCH_VLOXEI8_V, MASK_VLOXEI8_V) +DECLARE_INSN(vloxei16_v, MATCH_VLOXEI16_V, MASK_VLOXEI16_V) +DECLARE_INSN(vloxei32_v, MATCH_VLOXEI32_V, MASK_VLOXEI32_V) +DECLARE_INSN(vloxei64_v, MATCH_VLOXEI64_V, MASK_VLOXEI64_V) +DECLARE_INSN(vloxei128_v, MATCH_VLOXEI128_V, MASK_VLOXEI128_V) +DECLARE_INSN(vloxei256_v, MATCH_VLOXEI256_V, MASK_VLOXEI256_V) +DECLARE_INSN(vloxei512_v, MATCH_VLOXEI512_V, MASK_VLOXEI512_V) +DECLARE_INSN(vloxei1024_v, MATCH_VLOXEI1024_V, MASK_VLOXEI1024_V) +DECLARE_INSN(vsoxei8_v, MATCH_VSOXEI8_V, MASK_VSOXEI8_V) +DECLARE_INSN(vsoxei16_v, MATCH_VSOXEI16_V, MASK_VSOXEI16_V) +DECLARE_INSN(vsoxei32_v, MATCH_VSOXEI32_V, MASK_VSOXEI32_V) +DECLARE_INSN(vsoxei64_v, MATCH_VSOXEI64_V, MASK_VSOXEI64_V) +DECLARE_INSN(vsoxei128_v, MATCH_VSOXEI128_V, MASK_VSOXEI128_V) +DECLARE_INSN(vsoxei256_v, MATCH_VSOXEI256_V, MASK_VSOXEI256_V) +DECLARE_INSN(vsoxei512_v, MATCH_VSOXEI512_V, MASK_VSOXEI512_V) +DECLARE_INSN(vsoxei1024_v, MATCH_VSOXEI1024_V, MASK_VSOXEI1024_V) +DECLARE_INSN(vle8ff_v, MATCH_VLE8FF_V, MASK_VLE8FF_V) +DECLARE_INSN(vle16ff_v, MATCH_VLE16FF_V, MASK_VLE16FF_V) +DECLARE_INSN(vle32ff_v, MATCH_VLE32FF_V, MASK_VLE32FF_V) +DECLARE_INSN(vle64ff_v, MATCH_VLE64FF_V, MASK_VLE64FF_V) +DECLARE_INSN(vle128ff_v, MATCH_VLE128FF_V, MASK_VLE128FF_V) +DECLARE_INSN(vle256ff_v, MATCH_VLE256FF_V, MASK_VLE256FF_V) +DECLARE_INSN(vle512ff_v, MATCH_VLE512FF_V, MASK_VLE512FF_V) +DECLARE_INSN(vle1024ff_v, MATCH_VLE1024FF_V, MASK_VLE1024FF_V) +DECLARE_INSN(vl1re8_v, MATCH_VL1RE8_V, MASK_VL1RE8_V) +DECLARE_INSN(vl1re16_v, MATCH_VL1RE16_V, MASK_VL1RE16_V) +DECLARE_INSN(vl1re32_v, MATCH_VL1RE32_V, MASK_VL1RE32_V) +DECLARE_INSN(vl1re64_v, MATCH_VL1RE64_V, MASK_VL1RE64_V) +DECLARE_INSN(vl2re8_v, MATCH_VL2RE8_V, MASK_VL2RE8_V) +DECLARE_INSN(vl2re16_v, MATCH_VL2RE16_V, MASK_VL2RE16_V) +DECLARE_INSN(vl2re32_v, MATCH_VL2RE32_V, MASK_VL2RE32_V) +DECLARE_INSN(vl2re64_v, MATCH_VL2RE64_V, MASK_VL2RE64_V) +DECLARE_INSN(vl4re8_v, MATCH_VL4RE8_V, MASK_VL4RE8_V) +DECLARE_INSN(vl4re16_v, MATCH_VL4RE16_V, MASK_VL4RE16_V) +DECLARE_INSN(vl4re32_v, MATCH_VL4RE32_V, MASK_VL4RE32_V) +DECLARE_INSN(vl4re64_v, MATCH_VL4RE64_V, MASK_VL4RE64_V) +DECLARE_INSN(vl8re8_v, MATCH_VL8RE8_V, MASK_VL8RE8_V) +DECLARE_INSN(vl8re16_v, MATCH_VL8RE16_V, MASK_VL8RE16_V) +DECLARE_INSN(vl8re32_v, MATCH_VL8RE32_V, MASK_VL8RE32_V) +DECLARE_INSN(vl8re64_v, MATCH_VL8RE64_V, MASK_VL8RE64_V) +DECLARE_INSN(vs1r_v, MATCH_VS1R_V, MASK_VS1R_V) +DECLARE_INSN(vs2r_v, MATCH_VS2R_V, MASK_VS2R_V) +DECLARE_INSN(vs4r_v, MATCH_VS4R_V, MASK_VS4R_V) +DECLARE_INSN(vs8r_v, MATCH_VS8R_V, MASK_VS8R_V) +DECLARE_INSN(vfadd_vf, MATCH_VFADD_VF, MASK_VFADD_VF) +DECLARE_INSN(vfsub_vf, MATCH_VFSUB_VF, MASK_VFSUB_VF) +DECLARE_INSN(vfmin_vf, MATCH_VFMIN_VF, MASK_VFMIN_VF) +DECLARE_INSN(vfmax_vf, MATCH_VFMAX_VF, MASK_VFMAX_VF) +DECLARE_INSN(vfsgnj_vf, MATCH_VFSGNJ_VF, MASK_VFSGNJ_VF) +DECLARE_INSN(vfsgnjn_vf, MATCH_VFSGNJN_VF, MASK_VFSGNJN_VF) +DECLARE_INSN(vfsgnjx_vf, MATCH_VFSGNJX_VF, MASK_VFSGNJX_VF) +DECLARE_INSN(vfslide1up_vf, MATCH_VFSLIDE1UP_VF, MASK_VFSLIDE1UP_VF) +DECLARE_INSN(vfslide1down_vf, MATCH_VFSLIDE1DOWN_VF, MASK_VFSLIDE1DOWN_VF) +DECLARE_INSN(vfmv_s_f, MATCH_VFMV_S_F, MASK_VFMV_S_F) +DECLARE_INSN(vfmerge_vfm, MATCH_VFMERGE_VFM, MASK_VFMERGE_VFM) +DECLARE_INSN(vfmv_v_f, MATCH_VFMV_V_F, MASK_VFMV_V_F) +DECLARE_INSN(vmfeq_vf, MATCH_VMFEQ_VF, MASK_VMFEQ_VF) +DECLARE_INSN(vmfle_vf, MATCH_VMFLE_VF, MASK_VMFLE_VF) +DECLARE_INSN(vmflt_vf, MATCH_VMFLT_VF, MASK_VMFLT_VF) +DECLARE_INSN(vmfne_vf, MATCH_VMFNE_VF, MASK_VMFNE_VF) +DECLARE_INSN(vmfgt_vf, MATCH_VMFGT_VF, MASK_VMFGT_VF) +DECLARE_INSN(vmfge_vf, MATCH_VMFGE_VF, MASK_VMFGE_VF) +DECLARE_INSN(vfdiv_vf, MATCH_VFDIV_VF, MASK_VFDIV_VF) +DECLARE_INSN(vfrdiv_vf, MATCH_VFRDIV_VF, MASK_VFRDIV_VF) +DECLARE_INSN(vfmul_vf, MATCH_VFMUL_VF, MASK_VFMUL_VF) +DECLARE_INSN(vfrsub_vf, MATCH_VFRSUB_VF, MASK_VFRSUB_VF) +DECLARE_INSN(vfmadd_vf, MATCH_VFMADD_VF, MASK_VFMADD_VF) +DECLARE_INSN(vfnmadd_vf, MATCH_VFNMADD_VF, MASK_VFNMADD_VF) +DECLARE_INSN(vfmsub_vf, MATCH_VFMSUB_VF, MASK_VFMSUB_VF) +DECLARE_INSN(vfnmsub_vf, MATCH_VFNMSUB_VF, MASK_VFNMSUB_VF) +DECLARE_INSN(vfmacc_vf, MATCH_VFMACC_VF, MASK_VFMACC_VF) +DECLARE_INSN(vfnmacc_vf, MATCH_VFNMACC_VF, MASK_VFNMACC_VF) +DECLARE_INSN(vfmsac_vf, MATCH_VFMSAC_VF, MASK_VFMSAC_VF) +DECLARE_INSN(vfnmsac_vf, MATCH_VFNMSAC_VF, MASK_VFNMSAC_VF) +DECLARE_INSN(vfwadd_vf, MATCH_VFWADD_VF, MASK_VFWADD_VF) +DECLARE_INSN(vfwsub_vf, MATCH_VFWSUB_VF, MASK_VFWSUB_VF) +DECLARE_INSN(vfwadd_wf, MATCH_VFWADD_WF, MASK_VFWADD_WF) +DECLARE_INSN(vfwsub_wf, MATCH_VFWSUB_WF, MASK_VFWSUB_WF) +DECLARE_INSN(vfwmul_vf, MATCH_VFWMUL_VF, MASK_VFWMUL_VF) +DECLARE_INSN(vfwmacc_vf, MATCH_VFWMACC_VF, MASK_VFWMACC_VF) +DECLARE_INSN(vfwnmacc_vf, MATCH_VFWNMACC_VF, MASK_VFWNMACC_VF) +DECLARE_INSN(vfwmsac_vf, MATCH_VFWMSAC_VF, MASK_VFWMSAC_VF) +DECLARE_INSN(vfwnmsac_vf, MATCH_VFWNMSAC_VF, MASK_VFWNMSAC_VF) +DECLARE_INSN(vfadd_vv, MATCH_VFADD_VV, MASK_VFADD_VV) +DECLARE_INSN(vfredusum_vs, MATCH_VFREDUSUM_VS, MASK_VFREDUSUM_VS) +DECLARE_INSN(vfsub_vv, MATCH_VFSUB_VV, MASK_VFSUB_VV) +DECLARE_INSN(vfredosum_vs, MATCH_VFREDOSUM_VS, MASK_VFREDOSUM_VS) +DECLARE_INSN(vfmin_vv, MATCH_VFMIN_VV, MASK_VFMIN_VV) +DECLARE_INSN(vfredmin_vs, MATCH_VFREDMIN_VS, MASK_VFREDMIN_VS) +DECLARE_INSN(vfmax_vv, MATCH_VFMAX_VV, MASK_VFMAX_VV) +DECLARE_INSN(vfredmax_vs, MATCH_VFREDMAX_VS, MASK_VFREDMAX_VS) +DECLARE_INSN(vfsgnj_vv, MATCH_VFSGNJ_VV, MASK_VFSGNJ_VV) +DECLARE_INSN(vfsgnjn_vv, MATCH_VFSGNJN_VV, MASK_VFSGNJN_VV) +DECLARE_INSN(vfsgnjx_vv, MATCH_VFSGNJX_VV, MASK_VFSGNJX_VV) +DECLARE_INSN(vfmv_f_s, MATCH_VFMV_F_S, MASK_VFMV_F_S) +DECLARE_INSN(vmfeq_vv, MATCH_VMFEQ_VV, MASK_VMFEQ_VV) +DECLARE_INSN(vmfle_vv, MATCH_VMFLE_VV, MASK_VMFLE_VV) +DECLARE_INSN(vmflt_vv, MATCH_VMFLT_VV, MASK_VMFLT_VV) +DECLARE_INSN(vmfne_vv, MATCH_VMFNE_VV, MASK_VMFNE_VV) +DECLARE_INSN(vfdiv_vv, MATCH_VFDIV_VV, MASK_VFDIV_VV) +DECLARE_INSN(vfmul_vv, MATCH_VFMUL_VV, MASK_VFMUL_VV) +DECLARE_INSN(vfmadd_vv, MATCH_VFMADD_VV, MASK_VFMADD_VV) +DECLARE_INSN(vfnmadd_vv, MATCH_VFNMADD_VV, MASK_VFNMADD_VV) +DECLARE_INSN(vfmsub_vv, MATCH_VFMSUB_VV, MASK_VFMSUB_VV) +DECLARE_INSN(vfnmsub_vv, MATCH_VFNMSUB_VV, MASK_VFNMSUB_VV) +DECLARE_INSN(vfmacc_vv, MATCH_VFMACC_VV, MASK_VFMACC_VV) +DECLARE_INSN(vfnmacc_vv, MATCH_VFNMACC_VV, MASK_VFNMACC_VV) +DECLARE_INSN(vfmsac_vv, MATCH_VFMSAC_VV, MASK_VFMSAC_VV) +DECLARE_INSN(vfnmsac_vv, MATCH_VFNMSAC_VV, MASK_VFNMSAC_VV) +DECLARE_INSN(vfcvt_xu_f_v, MATCH_VFCVT_XU_F_V, MASK_VFCVT_XU_F_V) +DECLARE_INSN(vfcvt_x_f_v, MATCH_VFCVT_X_F_V, MASK_VFCVT_X_F_V) +DECLARE_INSN(vfcvt_f_xu_v, MATCH_VFCVT_F_XU_V, MASK_VFCVT_F_XU_V) +DECLARE_INSN(vfcvt_f_x_v, MATCH_VFCVT_F_X_V, MASK_VFCVT_F_X_V) +DECLARE_INSN(vfcvt_rtz_xu_f_v, MATCH_VFCVT_RTZ_XU_F_V, MASK_VFCVT_RTZ_XU_F_V) +DECLARE_INSN(vfcvt_rtz_x_f_v, MATCH_VFCVT_RTZ_X_F_V, MASK_VFCVT_RTZ_X_F_V) +DECLARE_INSN(vfwcvt_xu_f_v, MATCH_VFWCVT_XU_F_V, MASK_VFWCVT_XU_F_V) +DECLARE_INSN(vfwcvt_x_f_v, MATCH_VFWCVT_X_F_V, MASK_VFWCVT_X_F_V) +DECLARE_INSN(vfwcvt_f_xu_v, MATCH_VFWCVT_F_XU_V, MASK_VFWCVT_F_XU_V) +DECLARE_INSN(vfwcvt_f_x_v, MATCH_VFWCVT_F_X_V, MASK_VFWCVT_F_X_V) +DECLARE_INSN(vfwcvt_f_f_v, MATCH_VFWCVT_F_F_V, MASK_VFWCVT_F_F_V) +DECLARE_INSN(vfwcvt_rtz_xu_f_v, MATCH_VFWCVT_RTZ_XU_F_V, MASK_VFWCVT_RTZ_XU_F_V) +DECLARE_INSN(vfwcvt_rtz_x_f_v, MATCH_VFWCVT_RTZ_X_F_V, MASK_VFWCVT_RTZ_X_F_V) +DECLARE_INSN(vfncvt_xu_f_w, MATCH_VFNCVT_XU_F_W, MASK_VFNCVT_XU_F_W) +DECLARE_INSN(vfncvt_x_f_w, MATCH_VFNCVT_X_F_W, MASK_VFNCVT_X_F_W) +DECLARE_INSN(vfncvt_f_xu_w, MATCH_VFNCVT_F_XU_W, MASK_VFNCVT_F_XU_W) +DECLARE_INSN(vfncvt_f_x_w, MATCH_VFNCVT_F_X_W, MASK_VFNCVT_F_X_W) +DECLARE_INSN(vfncvt_f_f_w, MATCH_VFNCVT_F_F_W, MASK_VFNCVT_F_F_W) +DECLARE_INSN(vfncvt_rod_f_f_w, MATCH_VFNCVT_ROD_F_F_W, MASK_VFNCVT_ROD_F_F_W) +DECLARE_INSN(vfncvt_rtz_xu_f_w, MATCH_VFNCVT_RTZ_XU_F_W, MASK_VFNCVT_RTZ_XU_F_W) +DECLARE_INSN(vfncvt_rtz_x_f_w, MATCH_VFNCVT_RTZ_X_F_W, MASK_VFNCVT_RTZ_X_F_W) +DECLARE_INSN(vfsqrt_v, MATCH_VFSQRT_V, MASK_VFSQRT_V) +DECLARE_INSN(vfrsqrt7_v, MATCH_VFRSQRT7_V, MASK_VFRSQRT7_V) +DECLARE_INSN(vfrec7_v, MATCH_VFREC7_V, MASK_VFREC7_V) +DECLARE_INSN(vfclass_v, MATCH_VFCLASS_V, MASK_VFCLASS_V) +DECLARE_INSN(vfwadd_vv, MATCH_VFWADD_VV, MASK_VFWADD_VV) +DECLARE_INSN(vfwredusum_vs, MATCH_VFWREDUSUM_VS, MASK_VFWREDUSUM_VS) +DECLARE_INSN(vfwsub_vv, MATCH_VFWSUB_VV, MASK_VFWSUB_VV) +DECLARE_INSN(vfwredosum_vs, MATCH_VFWREDOSUM_VS, MASK_VFWREDOSUM_VS) +DECLARE_INSN(vfwadd_wv, MATCH_VFWADD_WV, MASK_VFWADD_WV) +DECLARE_INSN(vfwsub_wv, MATCH_VFWSUB_WV, MASK_VFWSUB_WV) +DECLARE_INSN(vfwmul_vv, MATCH_VFWMUL_VV, MASK_VFWMUL_VV) +DECLARE_INSN(vfwmacc_vv, MATCH_VFWMACC_VV, MASK_VFWMACC_VV) +DECLARE_INSN(vfwnmacc_vv, MATCH_VFWNMACC_VV, MASK_VFWNMACC_VV) +DECLARE_INSN(vfwmsac_vv, MATCH_VFWMSAC_VV, MASK_VFWMSAC_VV) +DECLARE_INSN(vfwnmsac_vv, MATCH_VFWNMSAC_VV, MASK_VFWNMSAC_VV) +DECLARE_INSN(vadd_vx, MATCH_VADD_VX, MASK_VADD_VX) +DECLARE_INSN(vsub_vx, MATCH_VSUB_VX, MASK_VSUB_VX) +DECLARE_INSN(vrsub_vx, MATCH_VRSUB_VX, MASK_VRSUB_VX) +DECLARE_INSN(vminu_vx, MATCH_VMINU_VX, MASK_VMINU_VX) +DECLARE_INSN(vmin_vx, MATCH_VMIN_VX, MASK_VMIN_VX) +DECLARE_INSN(vmaxu_vx, MATCH_VMAXU_VX, MASK_VMAXU_VX) +DECLARE_INSN(vmax_vx, MATCH_VMAX_VX, MASK_VMAX_VX) +DECLARE_INSN(vand_vx, MATCH_VAND_VX, MASK_VAND_VX) +DECLARE_INSN(vor_vx, MATCH_VOR_VX, MASK_VOR_VX) +DECLARE_INSN(vxor_vx, MATCH_VXOR_VX, MASK_VXOR_VX) +DECLARE_INSN(vrgather_vx, MATCH_VRGATHER_VX, MASK_VRGATHER_VX) +DECLARE_INSN(vslideup_vx, MATCH_VSLIDEUP_VX, MASK_VSLIDEUP_VX) +DECLARE_INSN(vslidedown_vx, MATCH_VSLIDEDOWN_VX, MASK_VSLIDEDOWN_VX) +DECLARE_INSN(vadc_vxm, MATCH_VADC_VXM, MASK_VADC_VXM) +DECLARE_INSN(vmadc_vxm, MATCH_VMADC_VXM, MASK_VMADC_VXM) +DECLARE_INSN(vmadc_vx, MATCH_VMADC_VX, MASK_VMADC_VX) +DECLARE_INSN(vsbc_vxm, MATCH_VSBC_VXM, MASK_VSBC_VXM) +DECLARE_INSN(vmsbc_vxm, MATCH_VMSBC_VXM, MASK_VMSBC_VXM) +DECLARE_INSN(vmsbc_vx, MATCH_VMSBC_VX, MASK_VMSBC_VX) +DECLARE_INSN(vmerge_vxm, MATCH_VMERGE_VXM, MASK_VMERGE_VXM) +DECLARE_INSN(vmv_v_x, MATCH_VMV_V_X, MASK_VMV_V_X) +DECLARE_INSN(vmseq_vx, MATCH_VMSEQ_VX, MASK_VMSEQ_VX) +DECLARE_INSN(vmsne_vx, MATCH_VMSNE_VX, MASK_VMSNE_VX) +DECLARE_INSN(vmsltu_vx, MATCH_VMSLTU_VX, MASK_VMSLTU_VX) +DECLARE_INSN(vmslt_vx, MATCH_VMSLT_VX, MASK_VMSLT_VX) +DECLARE_INSN(vmsleu_vx, MATCH_VMSLEU_VX, MASK_VMSLEU_VX) +DECLARE_INSN(vmsle_vx, MATCH_VMSLE_VX, MASK_VMSLE_VX) +DECLARE_INSN(vmsgtu_vx, MATCH_VMSGTU_VX, MASK_VMSGTU_VX) +DECLARE_INSN(vmsgt_vx, MATCH_VMSGT_VX, MASK_VMSGT_VX) +DECLARE_INSN(vsaddu_vx, MATCH_VSADDU_VX, MASK_VSADDU_VX) +DECLARE_INSN(vsadd_vx, MATCH_VSADD_VX, MASK_VSADD_VX) +DECLARE_INSN(vssubu_vx, MATCH_VSSUBU_VX, MASK_VSSUBU_VX) +DECLARE_INSN(vssub_vx, MATCH_VSSUB_VX, MASK_VSSUB_VX) +DECLARE_INSN(vsll_vx, MATCH_VSLL_VX, MASK_VSLL_VX) +DECLARE_INSN(vsmul_vx, MATCH_VSMUL_VX, MASK_VSMUL_VX) +DECLARE_INSN(vsrl_vx, MATCH_VSRL_VX, MASK_VSRL_VX) +DECLARE_INSN(vsra_vx, MATCH_VSRA_VX, MASK_VSRA_VX) +DECLARE_INSN(vssrl_vx, MATCH_VSSRL_VX, MASK_VSSRL_VX) +DECLARE_INSN(vssra_vx, MATCH_VSSRA_VX, MASK_VSSRA_VX) +DECLARE_INSN(vnsrl_wx, MATCH_VNSRL_WX, MASK_VNSRL_WX) +DECLARE_INSN(vnsra_wx, MATCH_VNSRA_WX, MASK_VNSRA_WX) +DECLARE_INSN(vnclipu_wx, MATCH_VNCLIPU_WX, MASK_VNCLIPU_WX) +DECLARE_INSN(vnclip_wx, MATCH_VNCLIP_WX, MASK_VNCLIP_WX) +DECLARE_INSN(vadd_vv, MATCH_VADD_VV, MASK_VADD_VV) +DECLARE_INSN(vsub_vv, MATCH_VSUB_VV, MASK_VSUB_VV) +DECLARE_INSN(vminu_vv, MATCH_VMINU_VV, MASK_VMINU_VV) +DECLARE_INSN(vmin_vv, MATCH_VMIN_VV, MASK_VMIN_VV) +DECLARE_INSN(vmaxu_vv, MATCH_VMAXU_VV, MASK_VMAXU_VV) +DECLARE_INSN(vmax_vv, MATCH_VMAX_VV, MASK_VMAX_VV) +DECLARE_INSN(vand_vv, MATCH_VAND_VV, MASK_VAND_VV) +DECLARE_INSN(vor_vv, MATCH_VOR_VV, MASK_VOR_VV) +DECLARE_INSN(vxor_vv, MATCH_VXOR_VV, MASK_VXOR_VV) +DECLARE_INSN(vrgather_vv, MATCH_VRGATHER_VV, MASK_VRGATHER_VV) +DECLARE_INSN(vrgatherei16_vv, MATCH_VRGATHEREI16_VV, MASK_VRGATHEREI16_VV) +DECLARE_INSN(vadc_vvm, MATCH_VADC_VVM, MASK_VADC_VVM) +DECLARE_INSN(vmadc_vvm, MATCH_VMADC_VVM, MASK_VMADC_VVM) +DECLARE_INSN(vmadc_vv, MATCH_VMADC_VV, MASK_VMADC_VV) +DECLARE_INSN(vsbc_vvm, MATCH_VSBC_VVM, MASK_VSBC_VVM) +DECLARE_INSN(vmsbc_vvm, MATCH_VMSBC_VVM, MASK_VMSBC_VVM) +DECLARE_INSN(vmsbc_vv, MATCH_VMSBC_VV, MASK_VMSBC_VV) +DECLARE_INSN(vmerge_vvm, MATCH_VMERGE_VVM, MASK_VMERGE_VVM) +DECLARE_INSN(vmv_v_v, MATCH_VMV_V_V, MASK_VMV_V_V) +DECLARE_INSN(vmseq_vv, MATCH_VMSEQ_VV, MASK_VMSEQ_VV) +DECLARE_INSN(vmsne_vv, MATCH_VMSNE_VV, MASK_VMSNE_VV) +DECLARE_INSN(vmsltu_vv, MATCH_VMSLTU_VV, MASK_VMSLTU_VV) +DECLARE_INSN(vmslt_vv, MATCH_VMSLT_VV, MASK_VMSLT_VV) +DECLARE_INSN(vmsleu_vv, MATCH_VMSLEU_VV, MASK_VMSLEU_VV) +DECLARE_INSN(vmsle_vv, MATCH_VMSLE_VV, MASK_VMSLE_VV) +DECLARE_INSN(vsaddu_vv, MATCH_VSADDU_VV, MASK_VSADDU_VV) +DECLARE_INSN(vsadd_vv, MATCH_VSADD_VV, MASK_VSADD_VV) +DECLARE_INSN(vssubu_vv, MATCH_VSSUBU_VV, MASK_VSSUBU_VV) +DECLARE_INSN(vssub_vv, MATCH_VSSUB_VV, MASK_VSSUB_VV) +DECLARE_INSN(vsll_vv, MATCH_VSLL_VV, MASK_VSLL_VV) +DECLARE_INSN(vsmul_vv, MATCH_VSMUL_VV, MASK_VSMUL_VV) +DECLARE_INSN(vsrl_vv, MATCH_VSRL_VV, MASK_VSRL_VV) +DECLARE_INSN(vsra_vv, MATCH_VSRA_VV, MASK_VSRA_VV) +DECLARE_INSN(vssrl_vv, MATCH_VSSRL_VV, MASK_VSSRL_VV) +DECLARE_INSN(vssra_vv, MATCH_VSSRA_VV, MASK_VSSRA_VV) +DECLARE_INSN(vnsrl_wv, MATCH_VNSRL_WV, MASK_VNSRL_WV) +DECLARE_INSN(vnsra_wv, MATCH_VNSRA_WV, MASK_VNSRA_WV) +DECLARE_INSN(vnclipu_wv, MATCH_VNCLIPU_WV, MASK_VNCLIPU_WV) +DECLARE_INSN(vnclip_wv, MATCH_VNCLIP_WV, MASK_VNCLIP_WV) +DECLARE_INSN(vwredsumu_vs, MATCH_VWREDSUMU_VS, MASK_VWREDSUMU_VS) +DECLARE_INSN(vwredsum_vs, MATCH_VWREDSUM_VS, MASK_VWREDSUM_VS) +DECLARE_INSN(vadd_vi, MATCH_VADD_VI, MASK_VADD_VI) +DECLARE_INSN(vrsub_vi, MATCH_VRSUB_VI, MASK_VRSUB_VI) +DECLARE_INSN(vand_vi, MATCH_VAND_VI, MASK_VAND_VI) +DECLARE_INSN(vor_vi, MATCH_VOR_VI, MASK_VOR_VI) +DECLARE_INSN(vxor_vi, MATCH_VXOR_VI, MASK_VXOR_VI) +DECLARE_INSN(vrgather_vi, MATCH_VRGATHER_VI, MASK_VRGATHER_VI) +DECLARE_INSN(vslideup_vi, MATCH_VSLIDEUP_VI, MASK_VSLIDEUP_VI) +DECLARE_INSN(vslidedown_vi, MATCH_VSLIDEDOWN_VI, MASK_VSLIDEDOWN_VI) +DECLARE_INSN(vadc_vim, MATCH_VADC_VIM, MASK_VADC_VIM) +DECLARE_INSN(vmadc_vim, MATCH_VMADC_VIM, MASK_VMADC_VIM) +DECLARE_INSN(vmadc_vi, MATCH_VMADC_VI, MASK_VMADC_VI) +DECLARE_INSN(vmerge_vim, MATCH_VMERGE_VIM, MASK_VMERGE_VIM) +DECLARE_INSN(vmv_v_i, MATCH_VMV_V_I, MASK_VMV_V_I) +DECLARE_INSN(vmseq_vi, MATCH_VMSEQ_VI, MASK_VMSEQ_VI) +DECLARE_INSN(vmsne_vi, MATCH_VMSNE_VI, MASK_VMSNE_VI) +DECLARE_INSN(vmsleu_vi, MATCH_VMSLEU_VI, MASK_VMSLEU_VI) +DECLARE_INSN(vmsle_vi, MATCH_VMSLE_VI, MASK_VMSLE_VI) +DECLARE_INSN(vmsgtu_vi, MATCH_VMSGTU_VI, MASK_VMSGTU_VI) +DECLARE_INSN(vmsgt_vi, MATCH_VMSGT_VI, MASK_VMSGT_VI) +DECLARE_INSN(vsaddu_vi, MATCH_VSADDU_VI, MASK_VSADDU_VI) +DECLARE_INSN(vsadd_vi, MATCH_VSADD_VI, MASK_VSADD_VI) +DECLARE_INSN(vsll_vi, MATCH_VSLL_VI, MASK_VSLL_VI) +DECLARE_INSN(vmv1r_v, MATCH_VMV1R_V, MASK_VMV1R_V) +DECLARE_INSN(vmv2r_v, MATCH_VMV2R_V, MASK_VMV2R_V) +DECLARE_INSN(vmv4r_v, MATCH_VMV4R_V, MASK_VMV4R_V) +DECLARE_INSN(vmv8r_v, MATCH_VMV8R_V, MASK_VMV8R_V) +DECLARE_INSN(vsrl_vi, MATCH_VSRL_VI, MASK_VSRL_VI) +DECLARE_INSN(vsra_vi, MATCH_VSRA_VI, MASK_VSRA_VI) +DECLARE_INSN(vssrl_vi, MATCH_VSSRL_VI, MASK_VSSRL_VI) +DECLARE_INSN(vssra_vi, MATCH_VSSRA_VI, MASK_VSSRA_VI) +DECLARE_INSN(vnsrl_wi, MATCH_VNSRL_WI, MASK_VNSRL_WI) +DECLARE_INSN(vnsra_wi, MATCH_VNSRA_WI, MASK_VNSRA_WI) +DECLARE_INSN(vnclipu_wi, MATCH_VNCLIPU_WI, MASK_VNCLIPU_WI) +DECLARE_INSN(vnclip_wi, MATCH_VNCLIP_WI, MASK_VNCLIP_WI) +DECLARE_INSN(vredsum_vs, MATCH_VREDSUM_VS, MASK_VREDSUM_VS) +DECLARE_INSN(vredand_vs, MATCH_VREDAND_VS, MASK_VREDAND_VS) +DECLARE_INSN(vredor_vs, MATCH_VREDOR_VS, MASK_VREDOR_VS) +DECLARE_INSN(vredxor_vs, MATCH_VREDXOR_VS, MASK_VREDXOR_VS) +DECLARE_INSN(vredminu_vs, MATCH_VREDMINU_VS, MASK_VREDMINU_VS) +DECLARE_INSN(vredmin_vs, MATCH_VREDMIN_VS, MASK_VREDMIN_VS) +DECLARE_INSN(vredmaxu_vs, MATCH_VREDMAXU_VS, MASK_VREDMAXU_VS) +DECLARE_INSN(vredmax_vs, MATCH_VREDMAX_VS, MASK_VREDMAX_VS) +DECLARE_INSN(vaaddu_vv, MATCH_VAADDU_VV, MASK_VAADDU_VV) +DECLARE_INSN(vaadd_vv, MATCH_VAADD_VV, MASK_VAADD_VV) +DECLARE_INSN(vasubu_vv, MATCH_VASUBU_VV, MASK_VASUBU_VV) +DECLARE_INSN(vasub_vv, MATCH_VASUB_VV, MASK_VASUB_VV) +DECLARE_INSN(vmv_x_s, MATCH_VMV_X_S, MASK_VMV_X_S) +DECLARE_INSN(vzext_vf8, MATCH_VZEXT_VF8, MASK_VZEXT_VF8) +DECLARE_INSN(vsext_vf8, MATCH_VSEXT_VF8, MASK_VSEXT_VF8) +DECLARE_INSN(vzext_vf4, MATCH_VZEXT_VF4, MASK_VZEXT_VF4) +DECLARE_INSN(vsext_vf4, MATCH_VSEXT_VF4, MASK_VSEXT_VF4) +DECLARE_INSN(vzext_vf2, MATCH_VZEXT_VF2, MASK_VZEXT_VF2) +DECLARE_INSN(vsext_vf2, MATCH_VSEXT_VF2, MASK_VSEXT_VF2) +DECLARE_INSN(vcompress_vm, MATCH_VCOMPRESS_VM, MASK_VCOMPRESS_VM) +DECLARE_INSN(vmandn_mm, MATCH_VMANDN_MM, MASK_VMANDN_MM) +DECLARE_INSN(vmand_mm, MATCH_VMAND_MM, MASK_VMAND_MM) +DECLARE_INSN(vmor_mm, MATCH_VMOR_MM, MASK_VMOR_MM) +DECLARE_INSN(vmxor_mm, MATCH_VMXOR_MM, MASK_VMXOR_MM) +DECLARE_INSN(vmorn_mm, MATCH_VMORN_MM, MASK_VMORN_MM) +DECLARE_INSN(vmnand_mm, MATCH_VMNAND_MM, MASK_VMNAND_MM) +DECLARE_INSN(vmnor_mm, MATCH_VMNOR_MM, MASK_VMNOR_MM) +DECLARE_INSN(vmxnor_mm, MATCH_VMXNOR_MM, MASK_VMXNOR_MM) +DECLARE_INSN(vmsbf_m, MATCH_VMSBF_M, MASK_VMSBF_M) +DECLARE_INSN(vmsof_m, MATCH_VMSOF_M, MASK_VMSOF_M) +DECLARE_INSN(vmsif_m, MATCH_VMSIF_M, MASK_VMSIF_M) +DECLARE_INSN(viota_m, MATCH_VIOTA_M, MASK_VIOTA_M) +DECLARE_INSN(vid_v, MATCH_VID_V, MASK_VID_V) +DECLARE_INSN(vcpop_m, MATCH_VCPOP_M, MASK_VCPOP_M) +DECLARE_INSN(vfirst_m, MATCH_VFIRST_M, MASK_VFIRST_M) +DECLARE_INSN(vdivu_vv, MATCH_VDIVU_VV, MASK_VDIVU_VV) +DECLARE_INSN(vdiv_vv, MATCH_VDIV_VV, MASK_VDIV_VV) +DECLARE_INSN(vremu_vv, MATCH_VREMU_VV, MASK_VREMU_VV) +DECLARE_INSN(vrem_vv, MATCH_VREM_VV, MASK_VREM_VV) +DECLARE_INSN(vmulhu_vv, MATCH_VMULHU_VV, MASK_VMULHU_VV) +DECLARE_INSN(vmul_vv, MATCH_VMUL_VV, MASK_VMUL_VV) +DECLARE_INSN(vmulhsu_vv, MATCH_VMULHSU_VV, MASK_VMULHSU_VV) +DECLARE_INSN(vmulh_vv, MATCH_VMULH_VV, MASK_VMULH_VV) +DECLARE_INSN(vmadd_vv, MATCH_VMADD_VV, MASK_VMADD_VV) +DECLARE_INSN(vnmsub_vv, MATCH_VNMSUB_VV, MASK_VNMSUB_VV) +DECLARE_INSN(vmacc_vv, MATCH_VMACC_VV, MASK_VMACC_VV) +DECLARE_INSN(vnmsac_vv, MATCH_VNMSAC_VV, MASK_VNMSAC_VV) +DECLARE_INSN(vwaddu_vv, MATCH_VWADDU_VV, MASK_VWADDU_VV) +DECLARE_INSN(vwadd_vv, MATCH_VWADD_VV, MASK_VWADD_VV) +DECLARE_INSN(vwsubu_vv, MATCH_VWSUBU_VV, MASK_VWSUBU_VV) +DECLARE_INSN(vwsub_vv, MATCH_VWSUB_VV, MASK_VWSUB_VV) +DECLARE_INSN(vwaddu_wv, MATCH_VWADDU_WV, MASK_VWADDU_WV) +DECLARE_INSN(vwadd_wv, MATCH_VWADD_WV, MASK_VWADD_WV) +DECLARE_INSN(vwsubu_wv, MATCH_VWSUBU_WV, MASK_VWSUBU_WV) +DECLARE_INSN(vwsub_wv, MATCH_VWSUB_WV, MASK_VWSUB_WV) +DECLARE_INSN(vwmulu_vv, MATCH_VWMULU_VV, MASK_VWMULU_VV) +DECLARE_INSN(vwmulsu_vv, MATCH_VWMULSU_VV, MASK_VWMULSU_VV) +DECLARE_INSN(vwmul_vv, MATCH_VWMUL_VV, MASK_VWMUL_VV) +DECLARE_INSN(vwmaccu_vv, MATCH_VWMACCU_VV, MASK_VWMACCU_VV) +DECLARE_INSN(vwmacc_vv, MATCH_VWMACC_VV, MASK_VWMACC_VV) +DECLARE_INSN(vwmaccsu_vv, MATCH_VWMACCSU_VV, MASK_VWMACCSU_VV) +DECLARE_INSN(vaaddu_vx, MATCH_VAADDU_VX, MASK_VAADDU_VX) +DECLARE_INSN(vaadd_vx, MATCH_VAADD_VX, MASK_VAADD_VX) +DECLARE_INSN(vasubu_vx, MATCH_VASUBU_VX, MASK_VASUBU_VX) +DECLARE_INSN(vasub_vx, MATCH_VASUB_VX, MASK_VASUB_VX) +DECLARE_INSN(vmv_s_x, MATCH_VMV_S_X, MASK_VMV_S_X) +DECLARE_INSN(vslide1up_vx, MATCH_VSLIDE1UP_VX, MASK_VSLIDE1UP_VX) +DECLARE_INSN(vslide1down_vx, MATCH_VSLIDE1DOWN_VX, MASK_VSLIDE1DOWN_VX) +DECLARE_INSN(vdivu_vx, MATCH_VDIVU_VX, MASK_VDIVU_VX) +DECLARE_INSN(vdiv_vx, MATCH_VDIV_VX, MASK_VDIV_VX) +DECLARE_INSN(vremu_vx, MATCH_VREMU_VX, MASK_VREMU_VX) +DECLARE_INSN(vrem_vx, MATCH_VREM_VX, MASK_VREM_VX) +DECLARE_INSN(vmulhu_vx, MATCH_VMULHU_VX, MASK_VMULHU_VX) +DECLARE_INSN(vmul_vx, MATCH_VMUL_VX, MASK_VMUL_VX) +DECLARE_INSN(vmulhsu_vx, MATCH_VMULHSU_VX, MASK_VMULHSU_VX) +DECLARE_INSN(vmulh_vx, MATCH_VMULH_VX, MASK_VMULH_VX) +DECLARE_INSN(vmadd_vx, MATCH_VMADD_VX, MASK_VMADD_VX) +DECLARE_INSN(vnmsub_vx, MATCH_VNMSUB_VX, MASK_VNMSUB_VX) +DECLARE_INSN(vmacc_vx, MATCH_VMACC_VX, MASK_VMACC_VX) +DECLARE_INSN(vnmsac_vx, MATCH_VNMSAC_VX, MASK_VNMSAC_VX) +DECLARE_INSN(vwaddu_vx, MATCH_VWADDU_VX, MASK_VWADDU_VX) +DECLARE_INSN(vwadd_vx, MATCH_VWADD_VX, MASK_VWADD_VX) +DECLARE_INSN(vwsubu_vx, MATCH_VWSUBU_VX, MASK_VWSUBU_VX) +DECLARE_INSN(vwsub_vx, MATCH_VWSUB_VX, MASK_VWSUB_VX) +DECLARE_INSN(vwaddu_wx, MATCH_VWADDU_WX, MASK_VWADDU_WX) +DECLARE_INSN(vwadd_wx, MATCH_VWADD_WX, MASK_VWADD_WX) +DECLARE_INSN(vwsubu_wx, MATCH_VWSUBU_WX, MASK_VWSUBU_WX) +DECLARE_INSN(vwsub_wx, MATCH_VWSUB_WX, MASK_VWSUB_WX) +DECLARE_INSN(vwmulu_vx, MATCH_VWMULU_VX, MASK_VWMULU_VX) +DECLARE_INSN(vwmulsu_vx, MATCH_VWMULSU_VX, MASK_VWMULSU_VX) +DECLARE_INSN(vwmul_vx, MATCH_VWMUL_VX, MASK_VWMUL_VX) +DECLARE_INSN(vwmaccu_vx, MATCH_VWMACCU_VX, MASK_VWMACCU_VX) +DECLARE_INSN(vwmacc_vx, MATCH_VWMACC_VX, MASK_VWMACC_VX) +DECLARE_INSN(vwmaccus_vx, MATCH_VWMACCUS_VX, MASK_VWMACCUS_VX) +DECLARE_INSN(vwmaccsu_vx, MATCH_VWMACCSU_VX, MASK_VWMACCSU_VX) +DECLARE_INSN(vamoswapei8_v, MATCH_VAMOSWAPEI8_V, MASK_VAMOSWAPEI8_V) +DECLARE_INSN(vamoaddei8_v, MATCH_VAMOADDEI8_V, MASK_VAMOADDEI8_V) +DECLARE_INSN(vamoxorei8_v, MATCH_VAMOXOREI8_V, MASK_VAMOXOREI8_V) +DECLARE_INSN(vamoandei8_v, MATCH_VAMOANDEI8_V, MASK_VAMOANDEI8_V) +DECLARE_INSN(vamoorei8_v, MATCH_VAMOOREI8_V, MASK_VAMOOREI8_V) +DECLARE_INSN(vamominei8_v, MATCH_VAMOMINEI8_V, MASK_VAMOMINEI8_V) +DECLARE_INSN(vamomaxei8_v, MATCH_VAMOMAXEI8_V, MASK_VAMOMAXEI8_V) +DECLARE_INSN(vamominuei8_v, MATCH_VAMOMINUEI8_V, MASK_VAMOMINUEI8_V) +DECLARE_INSN(vamomaxuei8_v, MATCH_VAMOMAXUEI8_V, MASK_VAMOMAXUEI8_V) +DECLARE_INSN(vamoswapei16_v, MATCH_VAMOSWAPEI16_V, MASK_VAMOSWAPEI16_V) +DECLARE_INSN(vamoaddei16_v, MATCH_VAMOADDEI16_V, MASK_VAMOADDEI16_V) +DECLARE_INSN(vamoxorei16_v, MATCH_VAMOXOREI16_V, MASK_VAMOXOREI16_V) +DECLARE_INSN(vamoandei16_v, MATCH_VAMOANDEI16_V, MASK_VAMOANDEI16_V) +DECLARE_INSN(vamoorei16_v, MATCH_VAMOOREI16_V, MASK_VAMOOREI16_V) +DECLARE_INSN(vamominei16_v, MATCH_VAMOMINEI16_V, MASK_VAMOMINEI16_V) +DECLARE_INSN(vamomaxei16_v, MATCH_VAMOMAXEI16_V, MASK_VAMOMAXEI16_V) +DECLARE_INSN(vamominuei16_v, MATCH_VAMOMINUEI16_V, MASK_VAMOMINUEI16_V) +DECLARE_INSN(vamomaxuei16_v, MATCH_VAMOMAXUEI16_V, MASK_VAMOMAXUEI16_V) +DECLARE_INSN(vamoswapei32_v, MATCH_VAMOSWAPEI32_V, MASK_VAMOSWAPEI32_V) +DECLARE_INSN(vamoaddei32_v, MATCH_VAMOADDEI32_V, MASK_VAMOADDEI32_V) +DECLARE_INSN(vamoxorei32_v, MATCH_VAMOXOREI32_V, MASK_VAMOXOREI32_V) +DECLARE_INSN(vamoandei32_v, MATCH_VAMOANDEI32_V, MASK_VAMOANDEI32_V) +DECLARE_INSN(vamoorei32_v, MATCH_VAMOOREI32_V, MASK_VAMOOREI32_V) +DECLARE_INSN(vamominei32_v, MATCH_VAMOMINEI32_V, MASK_VAMOMINEI32_V) +DECLARE_INSN(vamomaxei32_v, MATCH_VAMOMAXEI32_V, MASK_VAMOMAXEI32_V) +DECLARE_INSN(vamominuei32_v, MATCH_VAMOMINUEI32_V, MASK_VAMOMINUEI32_V) +DECLARE_INSN(vamomaxuei32_v, MATCH_VAMOMAXUEI32_V, MASK_VAMOMAXUEI32_V) +DECLARE_INSN(vamoswapei64_v, MATCH_VAMOSWAPEI64_V, MASK_VAMOSWAPEI64_V) +DECLARE_INSN(vamoaddei64_v, MATCH_VAMOADDEI64_V, MASK_VAMOADDEI64_V) +DECLARE_INSN(vamoxorei64_v, MATCH_VAMOXOREI64_V, MASK_VAMOXOREI64_V) +DECLARE_INSN(vamoandei64_v, MATCH_VAMOANDEI64_V, MASK_VAMOANDEI64_V) +DECLARE_INSN(vamoorei64_v, MATCH_VAMOOREI64_V, MASK_VAMOOREI64_V) +DECLARE_INSN(vamominei64_v, MATCH_VAMOMINEI64_V, MASK_VAMOMINEI64_V) +DECLARE_INSN(vamomaxei64_v, MATCH_VAMOMAXEI64_V, MASK_VAMOMAXEI64_V) +DECLARE_INSN(vamominuei64_v, MATCH_VAMOMINUEI64_V, MASK_VAMOMINUEI64_V) +DECLARE_INSN(vamomaxuei64_v, MATCH_VAMOMAXUEI64_V, MASK_VAMOMAXUEI64_V) +DECLARE_INSN(add8, MATCH_ADD8, MASK_ADD8) +DECLARE_INSN(add16, MATCH_ADD16, MASK_ADD16) +DECLARE_INSN(add64, MATCH_ADD64, MASK_ADD64) +DECLARE_INSN(ave, MATCH_AVE, MASK_AVE) +DECLARE_INSN(bitrev, MATCH_BITREV, MASK_BITREV) +DECLARE_INSN(bitrevi, MATCH_BITREVI, MASK_BITREVI) +DECLARE_INSN(bpick, MATCH_BPICK, MASK_BPICK) +DECLARE_INSN(clrs8, MATCH_CLRS8, MASK_CLRS8) +DECLARE_INSN(clrs16, MATCH_CLRS16, MASK_CLRS16) +DECLARE_INSN(clrs32, MATCH_CLRS32, MASK_CLRS32) +DECLARE_INSN(clo8, MATCH_CLO8, MASK_CLO8) +DECLARE_INSN(clo16, MATCH_CLO16, MASK_CLO16) +DECLARE_INSN(clo32, MATCH_CLO32, MASK_CLO32) +DECLARE_INSN(clz8, MATCH_CLZ8, MASK_CLZ8) +DECLARE_INSN(clz16, MATCH_CLZ16, MASK_CLZ16) +DECLARE_INSN(clz32, MATCH_CLZ32, MASK_CLZ32) +DECLARE_INSN(cmpeq8, MATCH_CMPEQ8, MASK_CMPEQ8) +DECLARE_INSN(cmpeq16, MATCH_CMPEQ16, MASK_CMPEQ16) +DECLARE_INSN(cras16, MATCH_CRAS16, MASK_CRAS16) +DECLARE_INSN(crsa16, MATCH_CRSA16, MASK_CRSA16) +DECLARE_INSN(insb, MATCH_INSB, MASK_INSB) +DECLARE_INSN(kabs8, MATCH_KABS8, MASK_KABS8) +DECLARE_INSN(kabs16, MATCH_KABS16, MASK_KABS16) +DECLARE_INSN(kabsw, MATCH_KABSW, MASK_KABSW) +DECLARE_INSN(kadd8, MATCH_KADD8, MASK_KADD8) +DECLARE_INSN(kadd16, MATCH_KADD16, MASK_KADD16) +DECLARE_INSN(kadd64, MATCH_KADD64, MASK_KADD64) +DECLARE_INSN(kaddh, MATCH_KADDH, MASK_KADDH) +DECLARE_INSN(kaddw, MATCH_KADDW, MASK_KADDW) +DECLARE_INSN(kcras16, MATCH_KCRAS16, MASK_KCRAS16) +DECLARE_INSN(kcrsa16, MATCH_KCRSA16, MASK_KCRSA16) +DECLARE_INSN(kdmbb, MATCH_KDMBB, MASK_KDMBB) +DECLARE_INSN(kdmbt, MATCH_KDMBT, MASK_KDMBT) +DECLARE_INSN(kdmtt, MATCH_KDMTT, MASK_KDMTT) +DECLARE_INSN(kdmabb, MATCH_KDMABB, MASK_KDMABB) +DECLARE_INSN(kdmabt, MATCH_KDMABT, MASK_KDMABT) +DECLARE_INSN(kdmatt, MATCH_KDMATT, MASK_KDMATT) +DECLARE_INSN(khm8, MATCH_KHM8, MASK_KHM8) +DECLARE_INSN(khmx8, MATCH_KHMX8, MASK_KHMX8) +DECLARE_INSN(khm16, MATCH_KHM16, MASK_KHM16) +DECLARE_INSN(khmx16, MATCH_KHMX16, MASK_KHMX16) +DECLARE_INSN(khmbb, MATCH_KHMBB, MASK_KHMBB) +DECLARE_INSN(khmbt, MATCH_KHMBT, MASK_KHMBT) +DECLARE_INSN(khmtt, MATCH_KHMTT, MASK_KHMTT) +DECLARE_INSN(kmabb, MATCH_KMABB, MASK_KMABB) +DECLARE_INSN(kmabt, MATCH_KMABT, MASK_KMABT) +DECLARE_INSN(kmatt, MATCH_KMATT, MASK_KMATT) +DECLARE_INSN(kmada, MATCH_KMADA, MASK_KMADA) +DECLARE_INSN(kmaxda, MATCH_KMAXDA, MASK_KMAXDA) +DECLARE_INSN(kmads, MATCH_KMADS, MASK_KMADS) +DECLARE_INSN(kmadrs, MATCH_KMADRS, MASK_KMADRS) +DECLARE_INSN(kmaxds, MATCH_KMAXDS, MASK_KMAXDS) +DECLARE_INSN(kmar64, MATCH_KMAR64, MASK_KMAR64) +DECLARE_INSN(kmda, MATCH_KMDA, MASK_KMDA) +DECLARE_INSN(kmxda, MATCH_KMXDA, MASK_KMXDA) +DECLARE_INSN(kmmac, MATCH_KMMAC, MASK_KMMAC) +DECLARE_INSN(kmmac_u, MATCH_KMMAC_U, MASK_KMMAC_U) +DECLARE_INSN(kmmawb, MATCH_KMMAWB, MASK_KMMAWB) +DECLARE_INSN(kmmawb_u, MATCH_KMMAWB_U, MASK_KMMAWB_U) +DECLARE_INSN(kmmawb2, MATCH_KMMAWB2, MASK_KMMAWB2) +DECLARE_INSN(kmmawb2_u, MATCH_KMMAWB2_U, MASK_KMMAWB2_U) +DECLARE_INSN(kmmawt, MATCH_KMMAWT, MASK_KMMAWT) +DECLARE_INSN(kmmawt_u, MATCH_KMMAWT_U, MASK_KMMAWT_U) +DECLARE_INSN(kmmawt2, MATCH_KMMAWT2, MASK_KMMAWT2) +DECLARE_INSN(kmmawt2_u, MATCH_KMMAWT2_U, MASK_KMMAWT2_U) +DECLARE_INSN(kmmsb, MATCH_KMMSB, MASK_KMMSB) +DECLARE_INSN(kmmsb_u, MATCH_KMMSB_U, MASK_KMMSB_U) +DECLARE_INSN(kmmwb2, MATCH_KMMWB2, MASK_KMMWB2) +DECLARE_INSN(kmmwb2_u, MATCH_KMMWB2_U, MASK_KMMWB2_U) +DECLARE_INSN(kmmwt2, MATCH_KMMWT2, MASK_KMMWT2) +DECLARE_INSN(kmmwt2_u, MATCH_KMMWT2_U, MASK_KMMWT2_U) +DECLARE_INSN(kmsda, MATCH_KMSDA, MASK_KMSDA) +DECLARE_INSN(kmsxda, MATCH_KMSXDA, MASK_KMSXDA) +DECLARE_INSN(kmsr64, MATCH_KMSR64, MASK_KMSR64) +DECLARE_INSN(ksllw, MATCH_KSLLW, MASK_KSLLW) +DECLARE_INSN(kslliw, MATCH_KSLLIW, MASK_KSLLIW) +DECLARE_INSN(ksll8, MATCH_KSLL8, MASK_KSLL8) +DECLARE_INSN(kslli8, MATCH_KSLLI8, MASK_KSLLI8) +DECLARE_INSN(ksll16, MATCH_KSLL16, MASK_KSLL16) +DECLARE_INSN(kslli16, MATCH_KSLLI16, MASK_KSLLI16) +DECLARE_INSN(kslra8, MATCH_KSLRA8, MASK_KSLRA8) +DECLARE_INSN(kslra8_u, MATCH_KSLRA8_U, MASK_KSLRA8_U) +DECLARE_INSN(kslra16, MATCH_KSLRA16, MASK_KSLRA16) +DECLARE_INSN(kslra16_u, MATCH_KSLRA16_U, MASK_KSLRA16_U) +DECLARE_INSN(kslraw, MATCH_KSLRAW, MASK_KSLRAW) +DECLARE_INSN(kslraw_u, MATCH_KSLRAW_U, MASK_KSLRAW_U) +DECLARE_INSN(kstas16, MATCH_KSTAS16, MASK_KSTAS16) +DECLARE_INSN(kstsa16, MATCH_KSTSA16, MASK_KSTSA16) +DECLARE_INSN(ksub8, MATCH_KSUB8, MASK_KSUB8) +DECLARE_INSN(ksub16, MATCH_KSUB16, MASK_KSUB16) +DECLARE_INSN(ksub64, MATCH_KSUB64, MASK_KSUB64) +DECLARE_INSN(ksubh, MATCH_KSUBH, MASK_KSUBH) +DECLARE_INSN(ksubw, MATCH_KSUBW, MASK_KSUBW) +DECLARE_INSN(kwmmul, MATCH_KWMMUL, MASK_KWMMUL) +DECLARE_INSN(kwmmul_u, MATCH_KWMMUL_U, MASK_KWMMUL_U) +DECLARE_INSN(maddr32, MATCH_MADDR32, MASK_MADDR32) +DECLARE_INSN(maxw, MATCH_MAXW, MASK_MAXW) +DECLARE_INSN(minw, MATCH_MINW, MASK_MINW) +DECLARE_INSN(msubr32, MATCH_MSUBR32, MASK_MSUBR32) +DECLARE_INSN(mulr64, MATCH_MULR64, MASK_MULR64) +DECLARE_INSN(mulsr64, MATCH_MULSR64, MASK_MULSR64) +DECLARE_INSN(pbsad, MATCH_PBSAD, MASK_PBSAD) +DECLARE_INSN(pbsada, MATCH_PBSADA, MASK_PBSADA) +DECLARE_INSN(pkbb16, MATCH_PKBB16, MASK_PKBB16) +DECLARE_INSN(pkbt16, MATCH_PKBT16, MASK_PKBT16) +DECLARE_INSN(pktt16, MATCH_PKTT16, MASK_PKTT16) +DECLARE_INSN(pktb16, MATCH_PKTB16, MASK_PKTB16) +DECLARE_INSN(radd8, MATCH_RADD8, MASK_RADD8) +DECLARE_INSN(radd16, MATCH_RADD16, MASK_RADD16) +DECLARE_INSN(radd64, MATCH_RADD64, MASK_RADD64) +DECLARE_INSN(raddw, MATCH_RADDW, MASK_RADDW) +DECLARE_INSN(rcras16, MATCH_RCRAS16, MASK_RCRAS16) +DECLARE_INSN(rcrsa16, MATCH_RCRSA16, MASK_RCRSA16) +DECLARE_INSN(rstas16, MATCH_RSTAS16, MASK_RSTAS16) +DECLARE_INSN(rstsa16, MATCH_RSTSA16, MASK_RSTSA16) +DECLARE_INSN(rsub8, MATCH_RSUB8, MASK_RSUB8) +DECLARE_INSN(rsub16, MATCH_RSUB16, MASK_RSUB16) +DECLARE_INSN(rsub64, MATCH_RSUB64, MASK_RSUB64) +DECLARE_INSN(rsubw, MATCH_RSUBW, MASK_RSUBW) +DECLARE_INSN(sclip8, MATCH_SCLIP8, MASK_SCLIP8) +DECLARE_INSN(sclip16, MATCH_SCLIP16, MASK_SCLIP16) +DECLARE_INSN(sclip32, MATCH_SCLIP32, MASK_SCLIP32) +DECLARE_INSN(scmple8, MATCH_SCMPLE8, MASK_SCMPLE8) +DECLARE_INSN(scmple16, MATCH_SCMPLE16, MASK_SCMPLE16) +DECLARE_INSN(scmplt8, MATCH_SCMPLT8, MASK_SCMPLT8) +DECLARE_INSN(scmplt16, MATCH_SCMPLT16, MASK_SCMPLT16) +DECLARE_INSN(sll8, MATCH_SLL8, MASK_SLL8) +DECLARE_INSN(slli8, MATCH_SLLI8, MASK_SLLI8) +DECLARE_INSN(sll16, MATCH_SLL16, MASK_SLL16) +DECLARE_INSN(slli16, MATCH_SLLI16, MASK_SLLI16) +DECLARE_INSN(smal, MATCH_SMAL, MASK_SMAL) +DECLARE_INSN(smalbb, MATCH_SMALBB, MASK_SMALBB) +DECLARE_INSN(smalbt, MATCH_SMALBT, MASK_SMALBT) +DECLARE_INSN(smaltt, MATCH_SMALTT, MASK_SMALTT) +DECLARE_INSN(smalda, MATCH_SMALDA, MASK_SMALDA) +DECLARE_INSN(smalxda, MATCH_SMALXDA, MASK_SMALXDA) +DECLARE_INSN(smalds, MATCH_SMALDS, MASK_SMALDS) +DECLARE_INSN(smaldrs, MATCH_SMALDRS, MASK_SMALDRS) +DECLARE_INSN(smalxds, MATCH_SMALXDS, MASK_SMALXDS) +DECLARE_INSN(smar64, MATCH_SMAR64, MASK_SMAR64) +DECLARE_INSN(smaqa, MATCH_SMAQA, MASK_SMAQA) +DECLARE_INSN(smaqa_su, MATCH_SMAQA_SU, MASK_SMAQA_SU) +DECLARE_INSN(smax8, MATCH_SMAX8, MASK_SMAX8) +DECLARE_INSN(smax16, MATCH_SMAX16, MASK_SMAX16) +DECLARE_INSN(smbb16, MATCH_SMBB16, MASK_SMBB16) +DECLARE_INSN(smbt16, MATCH_SMBT16, MASK_SMBT16) +DECLARE_INSN(smtt16, MATCH_SMTT16, MASK_SMTT16) +DECLARE_INSN(smds, MATCH_SMDS, MASK_SMDS) +DECLARE_INSN(smdrs, MATCH_SMDRS, MASK_SMDRS) +DECLARE_INSN(smxds, MATCH_SMXDS, MASK_SMXDS) +DECLARE_INSN(smin8, MATCH_SMIN8, MASK_SMIN8) +DECLARE_INSN(smin16, MATCH_SMIN16, MASK_SMIN16) +DECLARE_INSN(smmul, MATCH_SMMUL, MASK_SMMUL) +DECLARE_INSN(smmul_u, MATCH_SMMUL_U, MASK_SMMUL_U) +DECLARE_INSN(smmwb, MATCH_SMMWB, MASK_SMMWB) +DECLARE_INSN(smmwb_u, MATCH_SMMWB_U, MASK_SMMWB_U) +DECLARE_INSN(smmwt, MATCH_SMMWT, MASK_SMMWT) +DECLARE_INSN(smmwt_u, MATCH_SMMWT_U, MASK_SMMWT_U) +DECLARE_INSN(smslda, MATCH_SMSLDA, MASK_SMSLDA) +DECLARE_INSN(smslxda, MATCH_SMSLXDA, MASK_SMSLXDA) +DECLARE_INSN(smsr64, MATCH_SMSR64, MASK_SMSR64) +DECLARE_INSN(smul8, MATCH_SMUL8, MASK_SMUL8) +DECLARE_INSN(smulx8, MATCH_SMULX8, MASK_SMULX8) +DECLARE_INSN(smul16, MATCH_SMUL16, MASK_SMUL16) +DECLARE_INSN(smulx16, MATCH_SMULX16, MASK_SMULX16) +DECLARE_INSN(sra_u, MATCH_SRA_U, MASK_SRA_U) +DECLARE_INSN(srai_u, MATCH_SRAI_U, MASK_SRAI_U) +DECLARE_INSN(sra8, MATCH_SRA8, MASK_SRA8) +DECLARE_INSN(sra8_u, MATCH_SRA8_U, MASK_SRA8_U) +DECLARE_INSN(srai8, MATCH_SRAI8, MASK_SRAI8) +DECLARE_INSN(srai8_u, MATCH_SRAI8_U, MASK_SRAI8_U) +DECLARE_INSN(sra16, MATCH_SRA16, MASK_SRA16) +DECLARE_INSN(sra16_u, MATCH_SRA16_U, MASK_SRA16_U) +DECLARE_INSN(srai16, MATCH_SRAI16, MASK_SRAI16) +DECLARE_INSN(srai16_u, MATCH_SRAI16_U, MASK_SRAI16_U) +DECLARE_INSN(srl8, MATCH_SRL8, MASK_SRL8) +DECLARE_INSN(srl8_u, MATCH_SRL8_U, MASK_SRL8_U) +DECLARE_INSN(srli8, MATCH_SRLI8, MASK_SRLI8) +DECLARE_INSN(srli8_u, MATCH_SRLI8_U, MASK_SRLI8_U) +DECLARE_INSN(srl16, MATCH_SRL16, MASK_SRL16) +DECLARE_INSN(srl16_u, MATCH_SRL16_U, MASK_SRL16_U) +DECLARE_INSN(srli16, MATCH_SRLI16, MASK_SRLI16) +DECLARE_INSN(srli16_u, MATCH_SRLI16_U, MASK_SRLI16_U) +DECLARE_INSN(stas16, MATCH_STAS16, MASK_STAS16) +DECLARE_INSN(stsa16, MATCH_STSA16, MASK_STSA16) +DECLARE_INSN(sub8, MATCH_SUB8, MASK_SUB8) +DECLARE_INSN(sub16, MATCH_SUB16, MASK_SUB16) +DECLARE_INSN(sub64, MATCH_SUB64, MASK_SUB64) +DECLARE_INSN(sunpkd810, MATCH_SUNPKD810, MASK_SUNPKD810) +DECLARE_INSN(sunpkd820, MATCH_SUNPKD820, MASK_SUNPKD820) +DECLARE_INSN(sunpkd830, MATCH_SUNPKD830, MASK_SUNPKD830) +DECLARE_INSN(sunpkd831, MATCH_SUNPKD831, MASK_SUNPKD831) +DECLARE_INSN(sunpkd832, MATCH_SUNPKD832, MASK_SUNPKD832) +DECLARE_INSN(swap8, MATCH_SWAP8, MASK_SWAP8) +DECLARE_INSN(uclip8, MATCH_UCLIP8, MASK_UCLIP8) +DECLARE_INSN(uclip16, MATCH_UCLIP16, MASK_UCLIP16) +DECLARE_INSN(uclip32, MATCH_UCLIP32, MASK_UCLIP32) +DECLARE_INSN(ucmple8, MATCH_UCMPLE8, MASK_UCMPLE8) +DECLARE_INSN(ucmple16, MATCH_UCMPLE16, MASK_UCMPLE16) +DECLARE_INSN(ucmplt8, MATCH_UCMPLT8, MASK_UCMPLT8) +DECLARE_INSN(ucmplt16, MATCH_UCMPLT16, MASK_UCMPLT16) +DECLARE_INSN(ukadd8, MATCH_UKADD8, MASK_UKADD8) +DECLARE_INSN(ukadd16, MATCH_UKADD16, MASK_UKADD16) +DECLARE_INSN(ukadd64, MATCH_UKADD64, MASK_UKADD64) +DECLARE_INSN(ukaddh, MATCH_UKADDH, MASK_UKADDH) +DECLARE_INSN(ukaddw, MATCH_UKADDW, MASK_UKADDW) +DECLARE_INSN(ukcras16, MATCH_UKCRAS16, MASK_UKCRAS16) +DECLARE_INSN(ukcrsa16, MATCH_UKCRSA16, MASK_UKCRSA16) +DECLARE_INSN(ukmar64, MATCH_UKMAR64, MASK_UKMAR64) +DECLARE_INSN(ukmsr64, MATCH_UKMSR64, MASK_UKMSR64) +DECLARE_INSN(ukstas16, MATCH_UKSTAS16, MASK_UKSTAS16) +DECLARE_INSN(ukstsa16, MATCH_UKSTSA16, MASK_UKSTSA16) +DECLARE_INSN(uksub8, MATCH_UKSUB8, MASK_UKSUB8) +DECLARE_INSN(uksub16, MATCH_UKSUB16, MASK_UKSUB16) +DECLARE_INSN(uksub64, MATCH_UKSUB64, MASK_UKSUB64) +DECLARE_INSN(uksubh, MATCH_UKSUBH, MASK_UKSUBH) +DECLARE_INSN(uksubw, MATCH_UKSUBW, MASK_UKSUBW) +DECLARE_INSN(umar64, MATCH_UMAR64, MASK_UMAR64) +DECLARE_INSN(umaqa, MATCH_UMAQA, MASK_UMAQA) +DECLARE_INSN(umax8, MATCH_UMAX8, MASK_UMAX8) +DECLARE_INSN(umax16, MATCH_UMAX16, MASK_UMAX16) +DECLARE_INSN(umin8, MATCH_UMIN8, MASK_UMIN8) +DECLARE_INSN(umin16, MATCH_UMIN16, MASK_UMIN16) +DECLARE_INSN(umsr64, MATCH_UMSR64, MASK_UMSR64) +DECLARE_INSN(umul8, MATCH_UMUL8, MASK_UMUL8) +DECLARE_INSN(umulx8, MATCH_UMULX8, MASK_UMULX8) +DECLARE_INSN(umul16, MATCH_UMUL16, MASK_UMUL16) +DECLARE_INSN(umulx16, MATCH_UMULX16, MASK_UMULX16) +DECLARE_INSN(uradd8, MATCH_URADD8, MASK_URADD8) +DECLARE_INSN(uradd16, MATCH_URADD16, MASK_URADD16) +DECLARE_INSN(uradd64, MATCH_URADD64, MASK_URADD64) +DECLARE_INSN(uraddw, MATCH_URADDW, MASK_URADDW) +DECLARE_INSN(urcras16, MATCH_URCRAS16, MASK_URCRAS16) +DECLARE_INSN(urcrsa16, MATCH_URCRSA16, MASK_URCRSA16) +DECLARE_INSN(urstas16, MATCH_URSTAS16, MASK_URSTAS16) +DECLARE_INSN(urstsa16, MATCH_URSTSA16, MASK_URSTSA16) +DECLARE_INSN(ursub8, MATCH_URSUB8, MASK_URSUB8) +DECLARE_INSN(ursub16, MATCH_URSUB16, MASK_URSUB16) +DECLARE_INSN(ursub64, MATCH_URSUB64, MASK_URSUB64) +DECLARE_INSN(ursubw, MATCH_URSUBW, MASK_URSUBW) +DECLARE_INSN(wexti, MATCH_WEXTI, MASK_WEXTI) +DECLARE_INSN(wext, MATCH_WEXT, MASK_WEXT) +DECLARE_INSN(zunpkd810, MATCH_ZUNPKD810, MASK_ZUNPKD810) +DECLARE_INSN(zunpkd820, MATCH_ZUNPKD820, MASK_ZUNPKD820) +DECLARE_INSN(zunpkd830, MATCH_ZUNPKD830, MASK_ZUNPKD830) +DECLARE_INSN(zunpkd831, MATCH_ZUNPKD831, MASK_ZUNPKD831) +DECLARE_INSN(zunpkd832, MATCH_ZUNPKD832, MASK_ZUNPKD832) +DECLARE_INSN(add32, MATCH_ADD32, MASK_ADD32) +DECLARE_INSN(cras32, MATCH_CRAS32, MASK_CRAS32) +DECLARE_INSN(crsa32, MATCH_CRSA32, MASK_CRSA32) +DECLARE_INSN(kabs32, MATCH_KABS32, MASK_KABS32) +DECLARE_INSN(kadd32, MATCH_KADD32, MASK_KADD32) +DECLARE_INSN(kcras32, MATCH_KCRAS32, MASK_KCRAS32) +DECLARE_INSN(kcrsa32, MATCH_KCRSA32, MASK_KCRSA32) +DECLARE_INSN(kdmbb16, MATCH_KDMBB16, MASK_KDMBB16) +DECLARE_INSN(kdmbt16, MATCH_KDMBT16, MASK_KDMBT16) +DECLARE_INSN(kdmtt16, MATCH_KDMTT16, MASK_KDMTT16) +DECLARE_INSN(kdmabb16, MATCH_KDMABB16, MASK_KDMABB16) +DECLARE_INSN(kdmabt16, MATCH_KDMABT16, MASK_KDMABT16) +DECLARE_INSN(kdmatt16, MATCH_KDMATT16, MASK_KDMATT16) +DECLARE_INSN(khmbb16, MATCH_KHMBB16, MASK_KHMBB16) +DECLARE_INSN(khmbt16, MATCH_KHMBT16, MASK_KHMBT16) +DECLARE_INSN(khmtt16, MATCH_KHMTT16, MASK_KHMTT16) +DECLARE_INSN(kmabb32, MATCH_KMABB32, MASK_KMABB32) +DECLARE_INSN(kmabt32, MATCH_KMABT32, MASK_KMABT32) +DECLARE_INSN(kmatt32, MATCH_KMATT32, MASK_KMATT32) +DECLARE_INSN(kmaxda32, MATCH_KMAXDA32, MASK_KMAXDA32) +DECLARE_INSN(kmda32, MATCH_KMDA32, MASK_KMDA32) +DECLARE_INSN(kmxda32, MATCH_KMXDA32, MASK_KMXDA32) +DECLARE_INSN(kmads32, MATCH_KMADS32, MASK_KMADS32) +DECLARE_INSN(kmadrs32, MATCH_KMADRS32, MASK_KMADRS32) +DECLARE_INSN(kmaxds32, MATCH_KMAXDS32, MASK_KMAXDS32) +DECLARE_INSN(kmsda32, MATCH_KMSDA32, MASK_KMSDA32) +DECLARE_INSN(kmsxda32, MATCH_KMSXDA32, MASK_KMSXDA32) +DECLARE_INSN(ksll32, MATCH_KSLL32, MASK_KSLL32) +DECLARE_INSN(kslli32, MATCH_KSLLI32, MASK_KSLLI32) +DECLARE_INSN(kslra32, MATCH_KSLRA32, MASK_KSLRA32) +DECLARE_INSN(kslra32_u, MATCH_KSLRA32_U, MASK_KSLRA32_U) +DECLARE_INSN(kstas32, MATCH_KSTAS32, MASK_KSTAS32) +DECLARE_INSN(kstsa32, MATCH_KSTSA32, MASK_KSTSA32) +DECLARE_INSN(ksub32, MATCH_KSUB32, MASK_KSUB32) +DECLARE_INSN(pkbb32, MATCH_PKBB32, MASK_PKBB32) +DECLARE_INSN(pkbt32, MATCH_PKBT32, MASK_PKBT32) +DECLARE_INSN(pktt32, MATCH_PKTT32, MASK_PKTT32) +DECLARE_INSN(pktb32, MATCH_PKTB32, MASK_PKTB32) +DECLARE_INSN(radd32, MATCH_RADD32, MASK_RADD32) +DECLARE_INSN(rcras32, MATCH_RCRAS32, MASK_RCRAS32) +DECLARE_INSN(rcrsa32, MATCH_RCRSA32, MASK_RCRSA32) +DECLARE_INSN(rstas32, MATCH_RSTAS32, MASK_RSTAS32) +DECLARE_INSN(rstsa32, MATCH_RSTSA32, MASK_RSTSA32) +DECLARE_INSN(rsub32, MATCH_RSUB32, MASK_RSUB32) +DECLARE_INSN(sll32, MATCH_SLL32, MASK_SLL32) +DECLARE_INSN(slli32, MATCH_SLLI32, MASK_SLLI32) +DECLARE_INSN(smax32, MATCH_SMAX32, MASK_SMAX32) +DECLARE_INSN(smbt32, MATCH_SMBT32, MASK_SMBT32) +DECLARE_INSN(smtt32, MATCH_SMTT32, MASK_SMTT32) +DECLARE_INSN(smds32, MATCH_SMDS32, MASK_SMDS32) +DECLARE_INSN(smdrs32, MATCH_SMDRS32, MASK_SMDRS32) +DECLARE_INSN(smxds32, MATCH_SMXDS32, MASK_SMXDS32) +DECLARE_INSN(smin32, MATCH_SMIN32, MASK_SMIN32) +DECLARE_INSN(sra32, MATCH_SRA32, MASK_SRA32) +DECLARE_INSN(sra32_u, MATCH_SRA32_U, MASK_SRA32_U) +DECLARE_INSN(srai32, MATCH_SRAI32, MASK_SRAI32) +DECLARE_INSN(srai32_u, MATCH_SRAI32_U, MASK_SRAI32_U) +DECLARE_INSN(sraiw_u, MATCH_SRAIW_U, MASK_SRAIW_U) +DECLARE_INSN(srl32, MATCH_SRL32, MASK_SRL32) +DECLARE_INSN(srl32_u, MATCH_SRL32_U, MASK_SRL32_U) +DECLARE_INSN(srli32, MATCH_SRLI32, MASK_SRLI32) +DECLARE_INSN(srli32_u, MATCH_SRLI32_U, MASK_SRLI32_U) +DECLARE_INSN(stas32, MATCH_STAS32, MASK_STAS32) +DECLARE_INSN(stsa32, MATCH_STSA32, MASK_STSA32) +DECLARE_INSN(sub32, MATCH_SUB32, MASK_SUB32) +DECLARE_INSN(ukadd32, MATCH_UKADD32, MASK_UKADD32) +DECLARE_INSN(ukcras32, MATCH_UKCRAS32, MASK_UKCRAS32) +DECLARE_INSN(ukcrsa32, MATCH_UKCRSA32, MASK_UKCRSA32) +DECLARE_INSN(ukstas32, MATCH_UKSTAS32, MASK_UKSTAS32) +DECLARE_INSN(ukstsa32, MATCH_UKSTSA32, MASK_UKSTSA32) +DECLARE_INSN(uksub32, MATCH_UKSUB32, MASK_UKSUB32) +DECLARE_INSN(umax32, MATCH_UMAX32, MASK_UMAX32) +DECLARE_INSN(umin32, MATCH_UMIN32, MASK_UMIN32) +DECLARE_INSN(uradd32, MATCH_URADD32, MASK_URADD32) +DECLARE_INSN(urcras32, MATCH_URCRAS32, MASK_URCRAS32) +DECLARE_INSN(urcrsa32, MATCH_URCRSA32, MASK_URCRSA32) +DECLARE_INSN(urstas32, MATCH_URSTAS32, MASK_URSTAS32) +DECLARE_INSN(urstsa32, MATCH_URSTSA32, MASK_URSTSA32) +DECLARE_INSN(ursub32, MATCH_URSUB32, MASK_URSUB32) +DECLARE_INSN(vmvnfr_v, MATCH_VMVNFR_V, MASK_VMVNFR_V) +DECLARE_INSN(vl1r_v, MATCH_VL1R_V, MASK_VL1R_V) +DECLARE_INSN(vl2r_v, MATCH_VL2R_V, MASK_VL2R_V) +DECLARE_INSN(vl4r_v, MATCH_VL4R_V, MASK_VL4R_V) +DECLARE_INSN(vl8r_v, MATCH_VL8R_V, MASK_VL8R_V) +DECLARE_INSN(vle1_v, MATCH_VLE1_V, MASK_VLE1_V) +DECLARE_INSN(vse1_v, MATCH_VSE1_V, MASK_VSE1_V) +DECLARE_INSN(vfredsum_vs, MATCH_VFREDSUM_VS, MASK_VFREDSUM_VS) +DECLARE_INSN(vfwredsum_vs, MATCH_VFWREDSUM_VS, MASK_VFWREDSUM_VS) +DECLARE_INSN(vpopc_m, MATCH_VPOPC_M, MASK_VPOPC_M) +DECLARE_INSN(vmornot_mm, MATCH_VMORNOT_MM, MASK_VMORNOT_MM) +DECLARE_INSN(vmandnot_mm, MATCH_VMANDNOT_MM, MASK_VMANDNOT_MM) +#endif +#ifdef DECLARE_CSR +DECLARE_CSR(fflags, CSR_FFLAGS) +DECLARE_CSR(frm, CSR_FRM) +DECLARE_CSR(fcsr, CSR_FCSR) +DECLARE_CSR(vstart, CSR_VSTART) +DECLARE_CSR(vxsat, CSR_VXSAT) +DECLARE_CSR(vxrm, CSR_VXRM) +DECLARE_CSR(vcsr, CSR_VCSR) +DECLARE_CSR(seed, CSR_SEED) +DECLARE_CSR(cycle, CSR_CYCLE) +DECLARE_CSR(time, CSR_TIME) +DECLARE_CSR(instret, CSR_INSTRET) +DECLARE_CSR(hpmcounter3, CSR_HPMCOUNTER3) +DECLARE_CSR(hpmcounter4, CSR_HPMCOUNTER4) +DECLARE_CSR(hpmcounter5, CSR_HPMCOUNTER5) +DECLARE_CSR(hpmcounter6, CSR_HPMCOUNTER6) +DECLARE_CSR(hpmcounter7, CSR_HPMCOUNTER7) +DECLARE_CSR(hpmcounter8, CSR_HPMCOUNTER8) +DECLARE_CSR(hpmcounter9, CSR_HPMCOUNTER9) +DECLARE_CSR(hpmcounter10, CSR_HPMCOUNTER10) +DECLARE_CSR(hpmcounter11, CSR_HPMCOUNTER11) +DECLARE_CSR(hpmcounter12, CSR_HPMCOUNTER12) +DECLARE_CSR(hpmcounter13, CSR_HPMCOUNTER13) +DECLARE_CSR(hpmcounter14, CSR_HPMCOUNTER14) +DECLARE_CSR(hpmcounter15, CSR_HPMCOUNTER15) +DECLARE_CSR(hpmcounter16, CSR_HPMCOUNTER16) +DECLARE_CSR(hpmcounter17, CSR_HPMCOUNTER17) +DECLARE_CSR(hpmcounter18, CSR_HPMCOUNTER18) +DECLARE_CSR(hpmcounter19, CSR_HPMCOUNTER19) +DECLARE_CSR(hpmcounter20, CSR_HPMCOUNTER20) +DECLARE_CSR(hpmcounter21, CSR_HPMCOUNTER21) +DECLARE_CSR(hpmcounter22, CSR_HPMCOUNTER22) +DECLARE_CSR(hpmcounter23, CSR_HPMCOUNTER23) +DECLARE_CSR(hpmcounter24, CSR_HPMCOUNTER24) +DECLARE_CSR(hpmcounter25, CSR_HPMCOUNTER25) +DECLARE_CSR(hpmcounter26, CSR_HPMCOUNTER26) +DECLARE_CSR(hpmcounter27, CSR_HPMCOUNTER27) +DECLARE_CSR(hpmcounter28, CSR_HPMCOUNTER28) +DECLARE_CSR(hpmcounter29, CSR_HPMCOUNTER29) +DECLARE_CSR(hpmcounter30, CSR_HPMCOUNTER30) +DECLARE_CSR(hpmcounter31, CSR_HPMCOUNTER31) +DECLARE_CSR(vl, CSR_VL) +DECLARE_CSR(vtype, CSR_VTYPE) +DECLARE_CSR(vlenb, CSR_VLENB) +DECLARE_CSR(sstatus, CSR_SSTATUS) +DECLARE_CSR(sedeleg, CSR_SEDELEG) +DECLARE_CSR(sideleg, CSR_SIDELEG) +DECLARE_CSR(sie, CSR_SIE) +DECLARE_CSR(stvec, CSR_STVEC) +DECLARE_CSR(scounteren, CSR_SCOUNTEREN) +DECLARE_CSR(senvcfg, CSR_SENVCFG) +DECLARE_CSR(sscratch, CSR_SSCRATCH) +DECLARE_CSR(sepc, CSR_SEPC) +DECLARE_CSR(scause, CSR_SCAUSE) +DECLARE_CSR(stval, CSR_STVAL) +DECLARE_CSR(sip, CSR_SIP) +DECLARE_CSR(satp, CSR_SATP) +DECLARE_CSR(scontext, CSR_SCONTEXT) +DECLARE_CSR(vsstatus, CSR_VSSTATUS) +DECLARE_CSR(vsie, CSR_VSIE) +DECLARE_CSR(vstvec, CSR_VSTVEC) +DECLARE_CSR(vsscratch, CSR_VSSCRATCH) +DECLARE_CSR(vsepc, CSR_VSEPC) +DECLARE_CSR(vscause, CSR_VSCAUSE) +DECLARE_CSR(vstval, CSR_VSTVAL) +DECLARE_CSR(vsip, CSR_VSIP) +DECLARE_CSR(vsatp, CSR_VSATP) +DECLARE_CSR(hstatus, CSR_HSTATUS) +DECLARE_CSR(hedeleg, CSR_HEDELEG) +DECLARE_CSR(hideleg, CSR_HIDELEG) +DECLARE_CSR(hie, CSR_HIE) +DECLARE_CSR(htimedelta, CSR_HTIMEDELTA) +DECLARE_CSR(hcounteren, CSR_HCOUNTEREN) +DECLARE_CSR(hgeie, CSR_HGEIE) +DECLARE_CSR(henvcfg, CSR_HENVCFG) +DECLARE_CSR(htval, CSR_HTVAL) +DECLARE_CSR(hip, CSR_HIP) +DECLARE_CSR(hvip, CSR_HVIP) +DECLARE_CSR(htinst, CSR_HTINST) +DECLARE_CSR(hgatp, CSR_HGATP) +DECLARE_CSR(hcontext, CSR_HCONTEXT) +DECLARE_CSR(hgeip, CSR_HGEIP) +DECLARE_CSR(utvt, CSR_UTVT) +DECLARE_CSR(unxti, CSR_UNXTI) +DECLARE_CSR(uintstatus, CSR_UINTSTATUS) +DECLARE_CSR(uscratchcsw, CSR_USCRATCHCSW) +DECLARE_CSR(uscratchcswl, CSR_USCRATCHCSWL) +DECLARE_CSR(stvt, CSR_STVT) +DECLARE_CSR(snxti, CSR_SNXTI) +DECLARE_CSR(sintstatus, CSR_SINTSTATUS) +DECLARE_CSR(sscratchcsw, CSR_SSCRATCHCSW) +DECLARE_CSR(sscratchcswl, CSR_SSCRATCHCSWL) +DECLARE_CSR(mtvt, CSR_MTVT) +DECLARE_CSR(mnxti, CSR_MNXTI) +DECLARE_CSR(mintstatus, CSR_MINTSTATUS) +DECLARE_CSR(mscratchcsw, CSR_MSCRATCHCSW) +DECLARE_CSR(mscratchcswl, CSR_MSCRATCHCSWL) +DECLARE_CSR(mstatus, CSR_MSTATUS) +DECLARE_CSR(misa, CSR_MISA) +DECLARE_CSR(medeleg, CSR_MEDELEG) +DECLARE_CSR(mideleg, CSR_MIDELEG) +DECLARE_CSR(mie, CSR_MIE) +DECLARE_CSR(mtvec, CSR_MTVEC) +DECLARE_CSR(mcounteren, CSR_MCOUNTEREN) +DECLARE_CSR(menvcfg, CSR_MENVCFG) +DECLARE_CSR(mcountinhibit, CSR_MCOUNTINHIBIT) +DECLARE_CSR(mscratch, CSR_MSCRATCH) +DECLARE_CSR(mepc, CSR_MEPC) +DECLARE_CSR(mcause, CSR_MCAUSE) +DECLARE_CSR(mtval, CSR_MTVAL) +DECLARE_CSR(mip, CSR_MIP) +DECLARE_CSR(mtinst, CSR_MTINST) +DECLARE_CSR(mtval2, CSR_MTVAL2) +DECLARE_CSR(pmpcfg0, CSR_PMPCFG0) +DECLARE_CSR(pmpcfg1, CSR_PMPCFG1) +DECLARE_CSR(pmpcfg2, CSR_PMPCFG2) +DECLARE_CSR(pmpcfg3, CSR_PMPCFG3) +DECLARE_CSR(pmpcfg4, CSR_PMPCFG4) +DECLARE_CSR(pmpcfg5, CSR_PMPCFG5) +DECLARE_CSR(pmpcfg6, CSR_PMPCFG6) +DECLARE_CSR(pmpcfg7, CSR_PMPCFG7) +DECLARE_CSR(pmpcfg8, CSR_PMPCFG8) +DECLARE_CSR(pmpcfg9, CSR_PMPCFG9) +DECLARE_CSR(pmpcfg10, CSR_PMPCFG10) +DECLARE_CSR(pmpcfg11, CSR_PMPCFG11) +DECLARE_CSR(pmpcfg12, CSR_PMPCFG12) +DECLARE_CSR(pmpcfg13, CSR_PMPCFG13) +DECLARE_CSR(pmpcfg14, CSR_PMPCFG14) +DECLARE_CSR(pmpcfg15, CSR_PMPCFG15) +DECLARE_CSR(pmpaddr0, CSR_PMPADDR0) +DECLARE_CSR(pmpaddr1, CSR_PMPADDR1) +DECLARE_CSR(pmpaddr2, CSR_PMPADDR2) +DECLARE_CSR(pmpaddr3, CSR_PMPADDR3) +DECLARE_CSR(pmpaddr4, CSR_PMPADDR4) +DECLARE_CSR(pmpaddr5, CSR_PMPADDR5) +DECLARE_CSR(pmpaddr6, CSR_PMPADDR6) +DECLARE_CSR(pmpaddr7, CSR_PMPADDR7) +DECLARE_CSR(pmpaddr8, CSR_PMPADDR8) +DECLARE_CSR(pmpaddr9, CSR_PMPADDR9) +DECLARE_CSR(pmpaddr10, CSR_PMPADDR10) +DECLARE_CSR(pmpaddr11, CSR_PMPADDR11) +DECLARE_CSR(pmpaddr12, CSR_PMPADDR12) +DECLARE_CSR(pmpaddr13, CSR_PMPADDR13) +DECLARE_CSR(pmpaddr14, CSR_PMPADDR14) +DECLARE_CSR(pmpaddr15, CSR_PMPADDR15) +DECLARE_CSR(pmpaddr16, CSR_PMPADDR16) +DECLARE_CSR(pmpaddr17, CSR_PMPADDR17) +DECLARE_CSR(pmpaddr18, CSR_PMPADDR18) +DECLARE_CSR(pmpaddr19, CSR_PMPADDR19) +DECLARE_CSR(pmpaddr20, CSR_PMPADDR20) +DECLARE_CSR(pmpaddr21, CSR_PMPADDR21) +DECLARE_CSR(pmpaddr22, CSR_PMPADDR22) +DECLARE_CSR(pmpaddr23, CSR_PMPADDR23) +DECLARE_CSR(pmpaddr24, CSR_PMPADDR24) +DECLARE_CSR(pmpaddr25, CSR_PMPADDR25) +DECLARE_CSR(pmpaddr26, CSR_PMPADDR26) +DECLARE_CSR(pmpaddr27, CSR_PMPADDR27) +DECLARE_CSR(pmpaddr28, CSR_PMPADDR28) +DECLARE_CSR(pmpaddr29, CSR_PMPADDR29) +DECLARE_CSR(pmpaddr30, CSR_PMPADDR30) +DECLARE_CSR(pmpaddr31, CSR_PMPADDR31) +DECLARE_CSR(pmpaddr32, CSR_PMPADDR32) +DECLARE_CSR(pmpaddr33, CSR_PMPADDR33) +DECLARE_CSR(pmpaddr34, CSR_PMPADDR34) +DECLARE_CSR(pmpaddr35, CSR_PMPADDR35) +DECLARE_CSR(pmpaddr36, CSR_PMPADDR36) +DECLARE_CSR(pmpaddr37, CSR_PMPADDR37) +DECLARE_CSR(pmpaddr38, CSR_PMPADDR38) +DECLARE_CSR(pmpaddr39, CSR_PMPADDR39) +DECLARE_CSR(pmpaddr40, CSR_PMPADDR40) +DECLARE_CSR(pmpaddr41, CSR_PMPADDR41) +DECLARE_CSR(pmpaddr42, CSR_PMPADDR42) +DECLARE_CSR(pmpaddr43, CSR_PMPADDR43) +DECLARE_CSR(pmpaddr44, CSR_PMPADDR44) +DECLARE_CSR(pmpaddr45, CSR_PMPADDR45) +DECLARE_CSR(pmpaddr46, CSR_PMPADDR46) +DECLARE_CSR(pmpaddr47, CSR_PMPADDR47) +DECLARE_CSR(pmpaddr48, CSR_PMPADDR48) +DECLARE_CSR(pmpaddr49, CSR_PMPADDR49) +DECLARE_CSR(pmpaddr50, CSR_PMPADDR50) +DECLARE_CSR(pmpaddr51, CSR_PMPADDR51) +DECLARE_CSR(pmpaddr52, CSR_PMPADDR52) +DECLARE_CSR(pmpaddr53, CSR_PMPADDR53) +DECLARE_CSR(pmpaddr54, CSR_PMPADDR54) +DECLARE_CSR(pmpaddr55, CSR_PMPADDR55) +DECLARE_CSR(pmpaddr56, CSR_PMPADDR56) +DECLARE_CSR(pmpaddr57, CSR_PMPADDR57) +DECLARE_CSR(pmpaddr58, CSR_PMPADDR58) +DECLARE_CSR(pmpaddr59, CSR_PMPADDR59) +DECLARE_CSR(pmpaddr60, CSR_PMPADDR60) +DECLARE_CSR(pmpaddr61, CSR_PMPADDR61) +DECLARE_CSR(pmpaddr62, CSR_PMPADDR62) +DECLARE_CSR(pmpaddr63, CSR_PMPADDR63) +DECLARE_CSR(mseccfg, CSR_MSECCFG) +DECLARE_CSR(tselect, CSR_TSELECT) +DECLARE_CSR(tdata1, CSR_TDATA1) +DECLARE_CSR(tdata2, CSR_TDATA2) +DECLARE_CSR(tdata3, CSR_TDATA3) +DECLARE_CSR(tinfo, CSR_TINFO) +DECLARE_CSR(tcontrol, CSR_TCONTROL) +DECLARE_CSR(mcontext, CSR_MCONTEXT) +DECLARE_CSR(mscontext, CSR_MSCONTEXT) +DECLARE_CSR(dcsr, CSR_DCSR) +DECLARE_CSR(dpc, CSR_DPC) +DECLARE_CSR(dscratch0, CSR_DSCRATCH0) +DECLARE_CSR(dscratch1, CSR_DSCRATCH1) +DECLARE_CSR(mcycle, CSR_MCYCLE) +DECLARE_CSR(minstret, CSR_MINSTRET) +DECLARE_CSR(mhpmcounter3, CSR_MHPMCOUNTER3) +DECLARE_CSR(mhpmcounter4, CSR_MHPMCOUNTER4) +DECLARE_CSR(mhpmcounter5, CSR_MHPMCOUNTER5) +DECLARE_CSR(mhpmcounter6, CSR_MHPMCOUNTER6) +DECLARE_CSR(mhpmcounter7, CSR_MHPMCOUNTER7) +DECLARE_CSR(mhpmcounter8, CSR_MHPMCOUNTER8) +DECLARE_CSR(mhpmcounter9, CSR_MHPMCOUNTER9) +DECLARE_CSR(mhpmcounter10, CSR_MHPMCOUNTER10) +DECLARE_CSR(mhpmcounter11, CSR_MHPMCOUNTER11) +DECLARE_CSR(mhpmcounter12, CSR_MHPMCOUNTER12) +DECLARE_CSR(mhpmcounter13, CSR_MHPMCOUNTER13) +DECLARE_CSR(mhpmcounter14, CSR_MHPMCOUNTER14) +DECLARE_CSR(mhpmcounter15, CSR_MHPMCOUNTER15) +DECLARE_CSR(mhpmcounter16, CSR_MHPMCOUNTER16) +DECLARE_CSR(mhpmcounter17, CSR_MHPMCOUNTER17) +DECLARE_CSR(mhpmcounter18, CSR_MHPMCOUNTER18) +DECLARE_CSR(mhpmcounter19, CSR_MHPMCOUNTER19) +DECLARE_CSR(mhpmcounter20, CSR_MHPMCOUNTER20) +DECLARE_CSR(mhpmcounter21, CSR_MHPMCOUNTER21) +DECLARE_CSR(mhpmcounter22, CSR_MHPMCOUNTER22) +DECLARE_CSR(mhpmcounter23, CSR_MHPMCOUNTER23) +DECLARE_CSR(mhpmcounter24, CSR_MHPMCOUNTER24) +DECLARE_CSR(mhpmcounter25, CSR_MHPMCOUNTER25) +DECLARE_CSR(mhpmcounter26, CSR_MHPMCOUNTER26) +DECLARE_CSR(mhpmcounter27, CSR_MHPMCOUNTER27) +DECLARE_CSR(mhpmcounter28, CSR_MHPMCOUNTER28) +DECLARE_CSR(mhpmcounter29, CSR_MHPMCOUNTER29) +DECLARE_CSR(mhpmcounter30, CSR_MHPMCOUNTER30) +DECLARE_CSR(mhpmcounter31, CSR_MHPMCOUNTER31) +DECLARE_CSR(mhpmevent3, CSR_MHPMEVENT3) +DECLARE_CSR(mhpmevent4, CSR_MHPMEVENT4) +DECLARE_CSR(mhpmevent5, CSR_MHPMEVENT5) +DECLARE_CSR(mhpmevent6, CSR_MHPMEVENT6) +DECLARE_CSR(mhpmevent7, CSR_MHPMEVENT7) +DECLARE_CSR(mhpmevent8, CSR_MHPMEVENT8) +DECLARE_CSR(mhpmevent9, CSR_MHPMEVENT9) +DECLARE_CSR(mhpmevent10, CSR_MHPMEVENT10) +DECLARE_CSR(mhpmevent11, CSR_MHPMEVENT11) +DECLARE_CSR(mhpmevent12, CSR_MHPMEVENT12) +DECLARE_CSR(mhpmevent13, CSR_MHPMEVENT13) +DECLARE_CSR(mhpmevent14, CSR_MHPMEVENT14) +DECLARE_CSR(mhpmevent15, CSR_MHPMEVENT15) +DECLARE_CSR(mhpmevent16, CSR_MHPMEVENT16) +DECLARE_CSR(mhpmevent17, CSR_MHPMEVENT17) +DECLARE_CSR(mhpmevent18, CSR_MHPMEVENT18) +DECLARE_CSR(mhpmevent19, CSR_MHPMEVENT19) +DECLARE_CSR(mhpmevent20, CSR_MHPMEVENT20) +DECLARE_CSR(mhpmevent21, CSR_MHPMEVENT21) +DECLARE_CSR(mhpmevent22, CSR_MHPMEVENT22) +DECLARE_CSR(mhpmevent23, CSR_MHPMEVENT23) +DECLARE_CSR(mhpmevent24, CSR_MHPMEVENT24) +DECLARE_CSR(mhpmevent25, CSR_MHPMEVENT25) +DECLARE_CSR(mhpmevent26, CSR_MHPMEVENT26) +DECLARE_CSR(mhpmevent27, CSR_MHPMEVENT27) +DECLARE_CSR(mhpmevent28, CSR_MHPMEVENT28) +DECLARE_CSR(mhpmevent29, CSR_MHPMEVENT29) +DECLARE_CSR(mhpmevent30, CSR_MHPMEVENT30) +DECLARE_CSR(mhpmevent31, CSR_MHPMEVENT31) +DECLARE_CSR(mvendorid, CSR_MVENDORID) +DECLARE_CSR(marchid, CSR_MARCHID) +DECLARE_CSR(mimpid, CSR_MIMPID) +DECLARE_CSR(mhartid, CSR_MHARTID) +DECLARE_CSR(mconfigptr, CSR_MCONFIGPTR) +DECLARE_CSR(htimedeltah, CSR_HTIMEDELTAH) +DECLARE_CSR(henvcfgh, CSR_HENVCFGH) +DECLARE_CSR(cycleh, CSR_CYCLEH) +DECLARE_CSR(timeh, CSR_TIMEH) +DECLARE_CSR(instreth, CSR_INSTRETH) +DECLARE_CSR(hpmcounter3h, CSR_HPMCOUNTER3H) +DECLARE_CSR(hpmcounter4h, CSR_HPMCOUNTER4H) +DECLARE_CSR(hpmcounter5h, CSR_HPMCOUNTER5H) +DECLARE_CSR(hpmcounter6h, CSR_HPMCOUNTER6H) +DECLARE_CSR(hpmcounter7h, CSR_HPMCOUNTER7H) +DECLARE_CSR(hpmcounter8h, CSR_HPMCOUNTER8H) +DECLARE_CSR(hpmcounter9h, CSR_HPMCOUNTER9H) +DECLARE_CSR(hpmcounter10h, CSR_HPMCOUNTER10H) +DECLARE_CSR(hpmcounter11h, CSR_HPMCOUNTER11H) +DECLARE_CSR(hpmcounter12h, CSR_HPMCOUNTER12H) +DECLARE_CSR(hpmcounter13h, CSR_HPMCOUNTER13H) +DECLARE_CSR(hpmcounter14h, CSR_HPMCOUNTER14H) +DECLARE_CSR(hpmcounter15h, CSR_HPMCOUNTER15H) +DECLARE_CSR(hpmcounter16h, CSR_HPMCOUNTER16H) +DECLARE_CSR(hpmcounter17h, CSR_HPMCOUNTER17H) +DECLARE_CSR(hpmcounter18h, CSR_HPMCOUNTER18H) +DECLARE_CSR(hpmcounter19h, CSR_HPMCOUNTER19H) +DECLARE_CSR(hpmcounter20h, CSR_HPMCOUNTER20H) +DECLARE_CSR(hpmcounter21h, CSR_HPMCOUNTER21H) +DECLARE_CSR(hpmcounter22h, CSR_HPMCOUNTER22H) +DECLARE_CSR(hpmcounter23h, CSR_HPMCOUNTER23H) +DECLARE_CSR(hpmcounter24h, CSR_HPMCOUNTER24H) +DECLARE_CSR(hpmcounter25h, CSR_HPMCOUNTER25H) +DECLARE_CSR(hpmcounter26h, CSR_HPMCOUNTER26H) +DECLARE_CSR(hpmcounter27h, CSR_HPMCOUNTER27H) +DECLARE_CSR(hpmcounter28h, CSR_HPMCOUNTER28H) +DECLARE_CSR(hpmcounter29h, CSR_HPMCOUNTER29H) +DECLARE_CSR(hpmcounter30h, CSR_HPMCOUNTER30H) +DECLARE_CSR(hpmcounter31h, CSR_HPMCOUNTER31H) +DECLARE_CSR(mstatush, CSR_MSTATUSH) +DECLARE_CSR(menvcfgh, CSR_MENVCFGH) +DECLARE_CSR(mseccfgh, CSR_MSECCFGH) +DECLARE_CSR(mcycleh, CSR_MCYCLEH) +DECLARE_CSR(minstreth, CSR_MINSTRETH) +DECLARE_CSR(mhpmcounter3h, CSR_MHPMCOUNTER3H) +DECLARE_CSR(mhpmcounter4h, CSR_MHPMCOUNTER4H) +DECLARE_CSR(mhpmcounter5h, CSR_MHPMCOUNTER5H) +DECLARE_CSR(mhpmcounter6h, CSR_MHPMCOUNTER6H) +DECLARE_CSR(mhpmcounter7h, CSR_MHPMCOUNTER7H) +DECLARE_CSR(mhpmcounter8h, CSR_MHPMCOUNTER8H) +DECLARE_CSR(mhpmcounter9h, CSR_MHPMCOUNTER9H) +DECLARE_CSR(mhpmcounter10h, CSR_MHPMCOUNTER10H) +DECLARE_CSR(mhpmcounter11h, CSR_MHPMCOUNTER11H) +DECLARE_CSR(mhpmcounter12h, CSR_MHPMCOUNTER12H) +DECLARE_CSR(mhpmcounter13h, CSR_MHPMCOUNTER13H) +DECLARE_CSR(mhpmcounter14h, CSR_MHPMCOUNTER14H) +DECLARE_CSR(mhpmcounter15h, CSR_MHPMCOUNTER15H) +DECLARE_CSR(mhpmcounter16h, CSR_MHPMCOUNTER16H) +DECLARE_CSR(mhpmcounter17h, CSR_MHPMCOUNTER17H) +DECLARE_CSR(mhpmcounter18h, CSR_MHPMCOUNTER18H) +DECLARE_CSR(mhpmcounter19h, CSR_MHPMCOUNTER19H) +DECLARE_CSR(mhpmcounter20h, CSR_MHPMCOUNTER20H) +DECLARE_CSR(mhpmcounter21h, CSR_MHPMCOUNTER21H) +DECLARE_CSR(mhpmcounter22h, CSR_MHPMCOUNTER22H) +DECLARE_CSR(mhpmcounter23h, CSR_MHPMCOUNTER23H) +DECLARE_CSR(mhpmcounter24h, CSR_MHPMCOUNTER24H) +DECLARE_CSR(mhpmcounter25h, CSR_MHPMCOUNTER25H) +DECLARE_CSR(mhpmcounter26h, CSR_MHPMCOUNTER26H) +DECLARE_CSR(mhpmcounter27h, CSR_MHPMCOUNTER27H) +DECLARE_CSR(mhpmcounter28h, CSR_MHPMCOUNTER28H) +DECLARE_CSR(mhpmcounter29h, CSR_MHPMCOUNTER29H) +DECLARE_CSR(mhpmcounter30h, CSR_MHPMCOUNTER30H) +DECLARE_CSR(mhpmcounter31h, CSR_MHPMCOUNTER31H) +#endif +#ifdef DECLARE_CAUSE +DECLARE_CAUSE("misaligned fetch", CAUSE_MISALIGNED_FETCH) +DECLARE_CAUSE("fetch access", CAUSE_FETCH_ACCESS) +DECLARE_CAUSE("illegal instruction", CAUSE_ILLEGAL_INSTRUCTION) +DECLARE_CAUSE("breakpoint", CAUSE_BREAKPOINT) +DECLARE_CAUSE("misaligned load", CAUSE_MISALIGNED_LOAD) +DECLARE_CAUSE("load access", CAUSE_LOAD_ACCESS) +DECLARE_CAUSE("misaligned store", CAUSE_MISALIGNED_STORE) +DECLARE_CAUSE("store access", CAUSE_STORE_ACCESS) +DECLARE_CAUSE("user_ecall", CAUSE_USER_ECALL) +DECLARE_CAUSE("supervisor_ecall", CAUSE_SUPERVISOR_ECALL) +DECLARE_CAUSE("virtual_supervisor_ecall", CAUSE_VIRTUAL_SUPERVISOR_ECALL) +DECLARE_CAUSE("machine_ecall", CAUSE_MACHINE_ECALL) +DECLARE_CAUSE("fetch page fault", CAUSE_FETCH_PAGE_FAULT) +DECLARE_CAUSE("load page fault", CAUSE_LOAD_PAGE_FAULT) +DECLARE_CAUSE("store page fault", CAUSE_STORE_PAGE_FAULT) +DECLARE_CAUSE("fetch guest page fault", CAUSE_FETCH_GUEST_PAGE_FAULT) +DECLARE_CAUSE("load guest page fault", CAUSE_LOAD_GUEST_PAGE_FAULT) +DECLARE_CAUSE("virtual instruction", CAUSE_VIRTUAL_INSTRUCTION) +DECLARE_CAUSE("store guest page fault", CAUSE_STORE_GUEST_PAGE_FAULT) +#endif diff --git a/vendor/riscv-isa-sim/riscv/entropy_source.h b/vendor/riscv-isa-sim/riscv/entropy_source.h new file mode 100644 index 00000000..47823ff7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/entropy_source.h @@ -0,0 +1,119 @@ + +#include +#include + +#include "internals.h" + +// +// Used to model the cryptography extension entropy source. +// See Section 4 of the Scalar Cryptography Extension specificaiton. +class entropy_source { + +public: + + // Valid return codes for OPST bits [31:30] when reading seed. + static const uint32_t OPST_BIST = 0x0 << 30; + static const uint32_t OPST_WAIT = 0x1 << 30; + static const uint32_t OPST_ES16 = 0x2 << 30; + static const uint32_t OPST_DEAD = 0x3 << 30; + + // + // Other system events + // ------------------------------------------------------------ + + void reset() { + // Does nothing for now. In the future, can be used to model things + // like initial BIST states. + } + + // + // seed register + // ------------------------------------------------------------ + + void set_seed(reg_t val) { + // Always ignore writes to seed. + // This CSR is strictly read only. It occupies a RW CSR address + // to handle the side-effect of the changing seed value on a read. + } + + + // + // The format of seed is described in Section 4.1 of + // the scalar cryptography specification. + reg_t get_seed() { + + uint32_t result = 0; + + // Currently, always return ES16 (i.e. good randomness) In the future, we + // can more realistically model things like WAIT states, BIST warm up and + // maybe scriptable entry into the DEAD state, but until then, this is + // the bare minimum. + uint32_t return_status = OPST_ES16; + + if(return_status == OPST_ES16) { + + // Add some sampled entropy into the low 16 bits + uint16_t entropy = this -> get_two_random_bytes(); + result |= entropy; + + } else if(return_status == OPST_BIST) { + + // Do nothing. + + } else if(return_status == OPST_WAIT) { + + // Do nothing. + + } else if(return_status == OPST_DEAD) { + + // Do nothing. Stay dead. + + } else { + + // Unreachable. + + } + + // Note that if we get here any return_status is anything other than + // OPST_ES16, then the low 16-bits of the return value must be zero. + + result |= return_status; + + // Result is zero-extended on RV64. + return (reg_t)result; + } + + // + // Utility / support variables and functions. + // ------------------------------------------------------------ + + // The file to read entropy from. + std::string randomness_source = "/dev/urandom"; + + // Read two random bytes from the entropy source file. + uint16_t get_two_random_bytes() { + + std::ifstream fh(this -> randomness_source, std::ios::binary); + + if(fh.is_open()) { + + uint16_t random_bytes; + + fh.read((char*)(&random_bytes), 2); + + fh.close(); + + return random_bytes; + + } else { + + fprintf(stderr, "Could not open randomness source file:\n\t"); + fprintf(stderr, "%s", randomness_source.c_str()); + abort(); + + } + + } + +}; + diff --git a/vendor/riscv-isa-sim/riscv/execute.cc b/vendor/riscv-isa-sim/riscv/execute.cc new file mode 100644 index 00000000..98e3cdb0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/execute.cc @@ -0,0 +1,358 @@ +// See LICENSE for license details. + +#include "processor.h" +#include "mmu.h" +#include "disasm.h" +#include + +#ifdef RISCV_ENABLE_COMMITLOG +static void commit_log_reset(processor_t* p) +{ + p->get_state()->log_reg_write.clear(); + p->get_state()->log_mem_read.clear(); + p->get_state()->log_mem_write.clear(); +} + +static void commit_log_stash_privilege(processor_t* p) +{ + state_t* state = p->get_state(); + state->last_inst_priv = state->prv; + state->last_inst_xlen = p->get_xlen(); + state->last_inst_flen = p->get_flen(); +} + +static void commit_log_print_value(FILE *log_file, int width, const void *data) +{ + assert(log_file); + + switch (width) { + case 8: + fprintf(log_file, "0x%01" PRIx8, *(const uint8_t *)data); + break; + case 16: + fprintf(log_file, "0x%04" PRIx16, *(const uint16_t *)data); + break; + case 32: + fprintf(log_file, "0x%08" PRIx32, *(const uint32_t *)data); + break; + case 64: + fprintf(log_file, "0x%016" PRIx64, *(const uint64_t *)data); + break; + default: + // max lengh of vector + if (((width - 1) & width) == 0) { + const uint64_t *arr = (const uint64_t *)data; + + fprintf(log_file, "0x"); + for (int idx = width / 64 - 1; idx >= 0; --idx) { + fprintf(log_file, "%016" PRIx64, arr[idx]); + } + } else { + abort(); + } + break; + } +} + +static void commit_log_print_value(FILE *log_file, int width, uint64_t val) +{ + commit_log_print_value(log_file, width, &val); +} + +const char* processor_t::get_symbol(uint64_t addr) +{ + return sim->get_symbol(addr); +} + +static void commit_log_print_insn(processor_t *p, reg_t pc, insn_t insn) +{ + FILE *log_file = p->get_log_file(); + + auto& reg = p->get_state()->log_reg_write; + auto& load = p->get_state()->log_mem_read; + auto& store = p->get_state()->log_mem_write; + int priv = p->get_state()->last_inst_priv; + int xlen = p->get_state()->last_inst_xlen; + int flen = p->get_state()->last_inst_flen; + + // print core id on all lines so it is easy to grep + fprintf(log_file, "core%4" PRId32 ": ", p->get_id()); + + fprintf(log_file, "%1d ", priv); + commit_log_print_value(log_file, xlen, pc); + fprintf(log_file, " ("); + commit_log_print_value(log_file, insn.length() * 8, insn.bits()); + fprintf(log_file, ")"); + bool show_vec = false; + + for (auto item : reg) { + if (item.first == 0) + continue; + + char prefix; + int size; + int rd = item.first >> 4; + bool is_vec = false; + bool is_vreg = false; + switch (item.first & 0xf) { + case 0: + size = xlen; + prefix = 'x'; + break; + case 1: + size = flen; + prefix = 'f'; + break; + case 2: + size = p->VU.VLEN; + prefix = 'v'; + is_vreg = true; + break; + case 3: + is_vec = true; + break; + case 4: + size = xlen; + prefix = 'c'; + break; + default: + assert("can't been here" && 0); + break; + } + + if (!show_vec && (is_vreg || is_vec)) { + fprintf(log_file, " e%ld %s%ld l%ld", + p->VU.vsew, + p->VU.vflmul < 1 ? "mf" : "m", + p->VU.vflmul < 1 ? (reg_t)(1 / p->VU.vflmul) : (reg_t)p->VU.vflmul, + p->VU.vl->read()); + show_vec = true; + } + + if (!is_vec) { + if (prefix == 'c') + fprintf(log_file, " c%d_%s ", rd, csr_name(rd)); + else + fprintf(log_file, " %c%2d ", prefix, rd); + if (is_vreg) + commit_log_print_value(log_file, size, &p->VU.elt(rd, 0)); + else + commit_log_print_value(log_file, size, item.second.v); + } + } + + for (auto item : load) { + fprintf(log_file, " mem "); + commit_log_print_value(log_file, xlen, std::get<0>(item)); + } + + for (auto item : store) { + fprintf(log_file, " mem "); + commit_log_print_value(log_file, xlen, std::get<0>(item)); + fprintf(log_file, " "); + commit_log_print_value(log_file, std::get<2>(item) << 3, std::get<1>(item)); + } + fprintf(log_file, "\n"); +} +#else +static void commit_log_reset(processor_t* p) {} +static void commit_log_stash_privilege(processor_t* p) {} +static void commit_log_print_insn(processor_t* p, reg_t pc, insn_t insn) {} +#endif + +inline void processor_t::update_histogram(reg_t pc) +{ +#ifdef RISCV_ENABLE_HISTOGRAM + pc_histogram[pc]++; +#endif +} + +// This is expected to be inlined by the compiler so each use of execute_insn +// includes a duplicated body of the function to get separate fetch.func +// function calls. +static inline reg_t execute_insn(processor_t* p, reg_t pc, insn_fetch_t fetch) +{ + commit_log_reset(p); + commit_log_stash_privilege(p); + reg_t npc; + + try { + npc = fetch.func(p, fetch.insn, pc); + if (npc != PC_SERIALIZE_BEFORE) { + +#ifdef RISCV_ENABLE_COMMITLOG + if (p->get_log_commits_enabled()) { + commit_log_print_insn(p, pc, fetch.insn); + } +#endif + + } +#ifdef RISCV_ENABLE_COMMITLOG + } catch (wait_for_interrupt_t &t) { + if (p->get_log_commits_enabled()) { + commit_log_print_insn(p, pc, fetch.insn); + } + throw; + } catch(mem_trap_t& t) { + //handle segfault in midlle of vector load/store + if (p->get_log_commits_enabled()) { + for (auto item : p->get_state()->log_reg_write) { + if ((item.first & 3) == 3) { + commit_log_print_insn(p, pc, fetch.insn); + break; + } + } + } + throw; +#endif + } catch(...) { + throw; + } + p->update_histogram(pc); + + return npc; +} + +bool processor_t::slow_path() +{ + return debug || state.single_step != state.STEP_NONE || state.debug_mode; +} + +// fetch/decode/execute loop +void processor_t::step(size_t n) +{ + if (!state.debug_mode) { + if (halt_request == HR_REGULAR) { + enter_debug_mode(DCSR_CAUSE_DEBUGINT); + } else if (halt_request == HR_GROUP) { + enter_debug_mode(DCSR_CAUSE_GROUP); + } // !!!The halt bit in DCSR is deprecated. + else if (state.dcsr->halt) { + enter_debug_mode(DCSR_CAUSE_HALT); + } + } + + while (n > 0) { + size_t instret = 0; + reg_t pc = state.pc; + mmu_t* _mmu = mmu; + + #define advance_pc() \ + if (unlikely(invalid_pc(pc))) { \ + switch (pc) { \ + case PC_SERIALIZE_BEFORE: state.serialized = true; break; \ + case PC_SERIALIZE_AFTER: ++instret; break; \ + case PC_SERIALIZE_WFI: n = ++instret; break; \ + default: abort(); \ + } \ + pc = state.pc; \ + break; \ + } else { \ + state.pc = pc; \ + instret++; \ + } + + try + { + take_pending_interrupt(); + + if (unlikely(slow_path())) + { + // Main simulation loop, slow path. + while (instret < n) + { + if (unlikely(!state.serialized && state.single_step == state.STEP_STEPPED)) { + state.single_step = state.STEP_NONE; + if (!state.debug_mode) { + enter_debug_mode(DCSR_CAUSE_STEP); + // enter_debug_mode changed state.pc, so we can't just continue. + break; + } + } + + if (unlikely(state.single_step == state.STEP_STEPPING)) { + state.single_step = state.STEP_STEPPED; + } + + insn_fetch_t fetch = mmu->load_insn(pc); + if (debug && !state.serialized) + disasm(fetch.insn); + pc = execute_insn(this, pc, fetch); + advance_pc(); + } + } + else while (instret < n) + { + // Main simulation loop, fast path. + for (auto ic_entry = _mmu->access_icache(pc); ; ) { + auto fetch = ic_entry->data; + pc = execute_insn(this, pc, fetch); + ic_entry = ic_entry->next; + if (unlikely(ic_entry->tag != pc)) + break; + if (unlikely(instret + 1 == n)) + break; + instret++; + state.pc = pc; + } + + advance_pc(); + } + } + catch(trap_t& t) + { + take_trap(t, pc); + n = instret; + + if (unlikely(state.single_step == state.STEP_STEPPED)) { + state.single_step = state.STEP_NONE; + enter_debug_mode(DCSR_CAUSE_STEP); + } + } + catch (triggers::matched_t& t) + { + if (mmu->matched_trigger) { + // This exception came from the MMU. That means the instruction hasn't + // fully executed yet. We start it again, but this time it won't throw + // an exception because matched_trigger is already set. (All memory + // instructions are idempotent so restarting is safe.) + + insn_fetch_t fetch = mmu->load_insn(pc); + pc = execute_insn(this, pc, fetch); + advance_pc(); + + delete mmu->matched_trigger; + mmu->matched_trigger = NULL; + } + switch (t.action) { + case triggers::ACTION_DEBUG_MODE: + enter_debug_mode(DCSR_CAUSE_HWBP); + break; + case triggers::ACTION_DEBUG_EXCEPTION: { + trap_breakpoint trap(state.v, t.address); + take_trap(trap, pc); + break; + } + default: + abort(); + } + } + catch (wait_for_interrupt_t &t) + { + // Return to the outer simulation loop, which gives other devices/harts a + // chance to generate interrupts. + // + // In the debug ROM this prevents us from wasting time looping, but also + // allows us to switch to other threads only once per idle loop in case + // there is activity. + n = ++instret; + } + + state.minstret->bump(instret); + + // Model a hart whose CPI is 1. + state.mcycle->bump(instret); + + n -= instret; + } +} diff --git a/vendor/riscv-isa-sim/riscv/extension.cc b/vendor/riscv-isa-sim/riscv/extension.cc new file mode 100644 index 00000000..520c2ed5 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/extension.cc @@ -0,0 +1,23 @@ +// See LICENSE for license details. + +#include "extension.h" +#include "trap.h" + +extension_t::~extension_t() +{ +} + +void extension_t::illegal_instruction() +{ + throw trap_illegal_instruction(0); +} + +void extension_t::raise_interrupt() +{ + p->take_interrupt((reg_t)1 << IRQ_COP); // must not return + throw std::logic_error("a COP exception was posted, but interrupts are disabled!"); +} + +void extension_t::clear_interrupt() +{ +} diff --git a/vendor/riscv-isa-sim/riscv/extension.h b/vendor/riscv-isa-sim/riscv/extension.h new file mode 100644 index 00000000..d1e847d9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/extension.h @@ -0,0 +1,38 @@ +// See LICENSE for license details. + +#ifndef _RISCV_COPROCESSOR_H +#define _RISCV_COPROCESSOR_H + +#include "processor.h" +#include "disasm.h" +#include +#include + +class extension_t +{ + public: + virtual std::vector get_instructions() = 0; + virtual std::vector get_disasms() = 0; + virtual const char* name() = 0; + virtual void reset() {}; + virtual void set_debug(bool value) {}; + virtual ~extension_t(); + + void set_processor(processor_t* _p) { p = _p; } + protected: + processor_t* p; + + void illegal_instruction(); + void raise_interrupt(); + void clear_interrupt(); +}; + +std::function find_extension(const char* name); +void register_extension(const char* name, std::function f); + +#define REGISTER_EXTENSION(name, constructor) \ + class register_##name { \ + public: register_##name() { register_extension(#name, constructor); } \ + }; static register_##name dummy_##name; + +#endif diff --git a/vendor/riscv-isa-sim/riscv/extensions.cc b/vendor/riscv-isa-sim/riscv/extensions.cc new file mode 100644 index 00000000..347dc5e9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/extensions.cc @@ -0,0 +1,46 @@ +// See LICENSE for license details. + +#include "extension.h" +#include +#include +#include + +static std::map>& extensions() +{ + static std::map> v; + return v; +} + +void register_extension(const char* name, std::function f) +{ + extensions()[name] = f; +} + +std::function find_extension(const char* name) +{ + if (!extensions().count(name)) { + // try to find extension xyz by loading libxyz.so + std::string libname = std::string("lib") + name + ".so"; + std::string libdefault = "libcustomext.so"; + bool is_default = false; + auto dlh = dlopen(libname.c_str(), RTLD_LAZY); + if (!dlh) { + dlh = dlopen(libdefault.c_str(), RTLD_LAZY); + if (!dlh) { + fprintf(stderr, "couldn't find shared library either '%s' or '%s')\n", + libname.c_str(), libdefault.c_str()); + exit(-1); + } + + is_default = true; + } + + if (!extensions().count(name)) { + fprintf(stderr, "couldn't find extension '%s' in shared library '%s'\n", + name, is_default ? libdefault.c_str() : libname.c_str()); + exit(-1); + } + } + + return extensions()[name]; +} diff --git a/vendor/riscv-isa-sim/riscv/insn_macros.h b/vendor/riscv-isa-sim/riscv/insn_macros.h new file mode 100644 index 00000000..2fdfcedc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insn_macros.h @@ -0,0 +1,9 @@ +#ifndef _RISCV_INSN_MACROS_H +#define _RISCV_INSN_MACROS_H + +// These conflict with Boost headers so can't be included from insn_template.h +#define P (*p) + +#define require(x) do { if (unlikely(!(x))) throw trap_illegal_instruction(insn.bits()); } while (0) + +#endif diff --git a/vendor/riscv-isa-sim/riscv/insn_template.cc b/vendor/riscv-isa-sim/riscv/insn_template.cc new file mode 100644 index 00000000..e6a2f52c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insn_template.cc @@ -0,0 +1,47 @@ +// See LICENSE for license details. + +#include "insn_template.h" +#include "insn_macros.h" + +reg_t rv32i_NAME(processor_t* p, insn_t insn, reg_t pc) +{ + #define xlen 32 + reg_t npc = sext_xlen(pc + insn_length(OPCODE)); + #include "insns/NAME.h" + trace_opcode(p, OPCODE, insn); + #undef xlen + return npc; +} + +reg_t rv64i_NAME(processor_t* p, insn_t insn, reg_t pc) +{ + #define xlen 64 + reg_t npc = sext_xlen(pc + insn_length(OPCODE)); + #include "insns/NAME.h" + trace_opcode(p, OPCODE, insn); + #undef xlen + return npc; +} + +#undef CHECK_REG +#define CHECK_REG(reg) require((reg) < 16) + +reg_t rv32e_NAME(processor_t* p, insn_t insn, reg_t pc) +{ + #define xlen 32 + reg_t npc = sext_xlen(pc + insn_length(OPCODE)); + #include "insns/NAME.h" + trace_opcode(p, OPCODE, insn); + #undef xlen + return npc; +} + +reg_t rv64e_NAME(processor_t* p, insn_t insn, reg_t pc) +{ + #define xlen 64 + reg_t npc = sext_xlen(pc + insn_length(OPCODE)); + #include "insns/NAME.h" + trace_opcode(p, OPCODE, insn); + #undef xlen + return npc; +} diff --git a/vendor/riscv-isa-sim/riscv/insn_template.h b/vendor/riscv-isa-sim/riscv/insn_template.h new file mode 100644 index 00000000..3c36d10e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insn_template.h @@ -0,0 +1,9 @@ +// See LICENSE for license details. + +#include "arith.h" +#include "mmu.h" +#include "softfloat.h" +#include "internals.h" +#include "specialize.h" +#include "tracer.h" +#include diff --git a/vendor/riscv-isa-sim/riscv/insns/add.h b/vendor/riscv-isa-sim/riscv/insns/add.h new file mode 100644 index 00000000..895e2b18 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/add.h @@ -0,0 +1 @@ +WRITE_RD(sext_xlen(RS1 + RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/add16.h b/vendor/riscv-isa-sim/riscv/insns/add16.h new file mode 100644 index 00000000..fae43165 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/add16.h @@ -0,0 +1,3 @@ +P_LOOP(16, { + pd = ps1 + ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/add32.h b/vendor/riscv-isa-sim/riscv/insns/add32.h new file mode 100644 index 00000000..ca544cef --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/add32.h @@ -0,0 +1,4 @@ +require_rv64; +P_LOOP(32, { + pd = ps1 + ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/add64.h b/vendor/riscv-isa-sim/riscv/insns/add64.h new file mode 100644 index 00000000..0968656f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/add64.h @@ -0,0 +1,3 @@ +P_64_PROFILE({ + rd = rs1 + rs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/add8.h b/vendor/riscv-isa-sim/riscv/insns/add8.h new file mode 100644 index 00000000..bb54a7b6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/add8.h @@ -0,0 +1,3 @@ +P_LOOP(8, { + pd = ps1 + ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/add_uw.h b/vendor/riscv-isa-sim/riscv/insns/add_uw.h new file mode 100644 index 00000000..5b25a367 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/add_uw.h @@ -0,0 +1,3 @@ +require_rv64; +require_extension(EXT_ZBA); +WRITE_RD(sext_xlen(zext32(RS1) + RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/addi.h b/vendor/riscv-isa-sim/riscv/insns/addi.h new file mode 100644 index 00000000..1bb5dced --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/addi.h @@ -0,0 +1 @@ +WRITE_RD(sext_xlen(RS1 + insn.i_imm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/addiw.h b/vendor/riscv-isa-sim/riscv/insns/addiw.h new file mode 100644 index 00000000..4263eada --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/addiw.h @@ -0,0 +1,2 @@ +require_rv64; +WRITE_RD(sext32(insn.i_imm() + RS1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/addw.h b/vendor/riscv-isa-sim/riscv/insns/addw.h new file mode 100644 index 00000000..706dc9c8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/addw.h @@ -0,0 +1,2 @@ +require_rv64; +WRITE_RD(sext32(RS1 + RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/aes32dsi.h b/vendor/riscv-isa-sim/riscv/insns/aes32dsi.h new file mode 100644 index 00000000..b2680b01 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/aes32dsi.h @@ -0,0 +1,16 @@ + +#include "aes_common.h" + +require_rv32; +require_extension(EXT_ZKND); + +uint8_t bs = insn.bs(); + +uint8_t t0 = RS2 >> (8*bs); +uint8_t x = AES_DEC_SBOX[t0]; +uint32_t u = x; + +u = (u << (8*bs)) | (u >> (32-8*bs)); + +WRITE_RD(sext_xlen(u ^ RS1)); + diff --git a/vendor/riscv-isa-sim/riscv/insns/aes32dsmi.h b/vendor/riscv-isa-sim/riscv/insns/aes32dsmi.h new file mode 100644 index 00000000..d76abc08 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/aes32dsmi.h @@ -0,0 +1,21 @@ + +#include "aes_common.h" + +require_rv32; +require_extension(EXT_ZKND); + +uint8_t bs = insn.bs(); + +uint8_t t0 = RS2 >> (8*bs); +uint8_t x = AES_DEC_SBOX[t0]; +uint32_t u ; + +u = (AES_GFMUL(x,0xb) << 24) | + (AES_GFMUL(x,0xd) << 16) | + (AES_GFMUL(x,0x9) << 8) | + (AES_GFMUL(x,0xe) << 0) ; + +u = (u << (8*bs)) | (u >> (32-8*bs)); + +WRITE_RD(sext_xlen(u ^ RS1)); + diff --git a/vendor/riscv-isa-sim/riscv/insns/aes32esi.h b/vendor/riscv-isa-sim/riscv/insns/aes32esi.h new file mode 100644 index 00000000..d0c0a63b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/aes32esi.h @@ -0,0 +1,16 @@ + +#include "aes_common.h" + +require_rv32; +require_extension(EXT_ZKNE); + +uint8_t bs = insn.bs(); + +uint8_t t0 = RS2 >> (8*bs); +uint8_t x = AES_ENC_SBOX[t0]; +uint32_t u = x; + +u = (u << (8*bs)) | (u >> (32-8*bs)); + +WRITE_RD(sext_xlen(u ^ RS1)); + diff --git a/vendor/riscv-isa-sim/riscv/insns/aes32esmi.h b/vendor/riscv-isa-sim/riscv/insns/aes32esmi.h new file mode 100644 index 00000000..069718d9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/aes32esmi.h @@ -0,0 +1,21 @@ + +#include "aes_common.h" + +require_rv32; +require_extension(EXT_ZKNE); + +uint8_t bs = insn.bs(); + +uint8_t t0 = RS2 >> (8*bs); +uint8_t x = AES_ENC_SBOX[t0]; +uint32_t u ; + +u = (AES_GFMUL(x,3) << 24) | + ( x << 16) | + ( x << 8) | + (AES_GFMUL(x,2) << 0) ; + +u = (u << (8*bs)) | (u >> (32-8*bs)); + +WRITE_RD(sext_xlen(u ^ RS1)); + diff --git a/vendor/riscv-isa-sim/riscv/insns/aes64ds.h b/vendor/riscv-isa-sim/riscv/insns/aes64ds.h new file mode 100644 index 00000000..64baf87a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/aes64ds.h @@ -0,0 +1,21 @@ + +#include "aes_common.h" + +require_rv64; +require_extension(EXT_ZKND); + +uint64_t temp = AES_INVSHIFROWS_LO(RS1,RS2); + + temp = ( + ((uint64_t)AES_DEC_SBOX[(temp >> 0) & 0xFF] << 0) | + ((uint64_t)AES_DEC_SBOX[(temp >> 8) & 0xFF] << 8) | + ((uint64_t)AES_DEC_SBOX[(temp >> 16) & 0xFF] << 16) | + ((uint64_t)AES_DEC_SBOX[(temp >> 24) & 0xFF] << 24) | + ((uint64_t)AES_DEC_SBOX[(temp >> 32) & 0xFF] << 32) | + ((uint64_t)AES_DEC_SBOX[(temp >> 40) & 0xFF] << 40) | + ((uint64_t)AES_DEC_SBOX[(temp >> 48) & 0xFF] << 48) | + ((uint64_t)AES_DEC_SBOX[(temp >> 56) & 0xFF] << 56) +); + +WRITE_RD(temp); + diff --git a/vendor/riscv-isa-sim/riscv/insns/aes64dsm.h b/vendor/riscv-isa-sim/riscv/insns/aes64dsm.h new file mode 100644 index 00000000..eccf02fc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/aes64dsm.h @@ -0,0 +1,29 @@ + +#include "aes_common.h" + +require_rv64; +require_extension(EXT_ZKND); + +uint64_t temp = AES_INVSHIFROWS_LO(RS1,RS2); + + temp = ( + ((uint64_t)AES_DEC_SBOX[(temp >> 0) & 0xFF] << 0) | + ((uint64_t)AES_DEC_SBOX[(temp >> 8) & 0xFF] << 8) | + ((uint64_t)AES_DEC_SBOX[(temp >> 16) & 0xFF] << 16) | + ((uint64_t)AES_DEC_SBOX[(temp >> 24) & 0xFF] << 24) | + ((uint64_t)AES_DEC_SBOX[(temp >> 32) & 0xFF] << 32) | + ((uint64_t)AES_DEC_SBOX[(temp >> 40) & 0xFF] << 40) | + ((uint64_t)AES_DEC_SBOX[(temp >> 48) & 0xFF] << 48) | + ((uint64_t)AES_DEC_SBOX[(temp >> 56) & 0xFF] << 56) +); + +uint32_t col_0 = temp & 0xFFFFFFFF; +uint32_t col_1 = temp >> 32 ; + + col_0 = AES_INVMIXCOLUMN(col_0); + col_1 = AES_INVMIXCOLUMN(col_1); + +uint64_t result= ((uint64_t)col_1 << 32) | col_0; + +WRITE_RD(result); + diff --git a/vendor/riscv-isa-sim/riscv/insns/aes64es.h b/vendor/riscv-isa-sim/riscv/insns/aes64es.h new file mode 100644 index 00000000..6bbc4efe --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/aes64es.h @@ -0,0 +1,21 @@ + +#include "aes_common.h" + +require_rv64; +require_extension(EXT_ZKNE); + +uint64_t temp = AES_SHIFROWS_LO(RS1,RS2); + + temp = ( + ((uint64_t)AES_ENC_SBOX[(temp >> 0) & 0xFF] << 0) | + ((uint64_t)AES_ENC_SBOX[(temp >> 8) & 0xFF] << 8) | + ((uint64_t)AES_ENC_SBOX[(temp >> 16) & 0xFF] << 16) | + ((uint64_t)AES_ENC_SBOX[(temp >> 24) & 0xFF] << 24) | + ((uint64_t)AES_ENC_SBOX[(temp >> 32) & 0xFF] << 32) | + ((uint64_t)AES_ENC_SBOX[(temp >> 40) & 0xFF] << 40) | + ((uint64_t)AES_ENC_SBOX[(temp >> 48) & 0xFF] << 48) | + ((uint64_t)AES_ENC_SBOX[(temp >> 56) & 0xFF] << 56) +); + +WRITE_RD(temp); + diff --git a/vendor/riscv-isa-sim/riscv/insns/aes64esm.h b/vendor/riscv-isa-sim/riscv/insns/aes64esm.h new file mode 100644 index 00000000..0351c11b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/aes64esm.h @@ -0,0 +1,29 @@ + +#include "aes_common.h" + +require_rv64; +require_extension(EXT_ZKNE); + +uint64_t temp = AES_SHIFROWS_LO(RS1,RS2); + + temp = ( + ((uint64_t)AES_ENC_SBOX[(temp >> 0) & 0xFF] << 0) | + ((uint64_t)AES_ENC_SBOX[(temp >> 8) & 0xFF] << 8) | + ((uint64_t)AES_ENC_SBOX[(temp >> 16) & 0xFF] << 16) | + ((uint64_t)AES_ENC_SBOX[(temp >> 24) & 0xFF] << 24) | + ((uint64_t)AES_ENC_SBOX[(temp >> 32) & 0xFF] << 32) | + ((uint64_t)AES_ENC_SBOX[(temp >> 40) & 0xFF] << 40) | + ((uint64_t)AES_ENC_SBOX[(temp >> 48) & 0xFF] << 48) | + ((uint64_t)AES_ENC_SBOX[(temp >> 56) & 0xFF] << 56) +); + +uint32_t col_0 = temp & 0xFFFFFFFF; +uint32_t col_1 = temp >> 32 ; + + col_0 = AES_MIXCOLUMN(col_0); + col_1 = AES_MIXCOLUMN(col_1); + +uint64_t result= ((uint64_t)col_1 << 32) | col_0; + +WRITE_RD(result); + diff --git a/vendor/riscv-isa-sim/riscv/insns/aes64im.h b/vendor/riscv-isa-sim/riscv/insns/aes64im.h new file mode 100644 index 00000000..9dd9b021 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/aes64im.h @@ -0,0 +1,16 @@ + +#include "aes_common.h" + +require_rv64; +require_extension(EXT_ZKND); + +uint32_t col_0 = RS1 & 0xFFFFFFFF; +uint32_t col_1 = RS1 >> 32 ; + + col_0 = AES_INVMIXCOLUMN(col_0); + col_1 = AES_INVMIXCOLUMN(col_1); + +uint64_t result= ((uint64_t)col_1 << 32) | col_0; + +WRITE_RD(result); + diff --git a/vendor/riscv-isa-sim/riscv/insns/aes64ks1i.h b/vendor/riscv-isa-sim/riscv/insns/aes64ks1i.h new file mode 100644 index 00000000..fff7109c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/aes64ks1i.h @@ -0,0 +1,38 @@ + +#include "aes_common.h" + +require_rv64; +require_either_extension(EXT_ZKND, EXT_ZKNE); + +uint8_t round_consts [10] = { + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36 +}; + +uint8_t enc_rcon = insn.rcon() ; + +if(enc_rcon > 0xA) { + // Invalid opcode. + throw trap_illegal_instruction(0); +} + +uint32_t temp = (RS1 >> 32) & 0xFFFFFFFF ; +uint8_t rcon = 0 ; +uint64_t result ; + +if(enc_rcon != 0xA) { + temp = (temp >> 8) | (temp << 24); // Rotate right by 8 + rcon = round_consts[enc_rcon]; +} + +temp = + ((uint32_t)AES_ENC_SBOX[(temp >> 24) & 0xFF] << 24) | + ((uint32_t)AES_ENC_SBOX[(temp >> 16) & 0xFF] << 16) | + ((uint32_t)AES_ENC_SBOX[(temp >> 8) & 0xFF] << 8) | + ((uint32_t)AES_ENC_SBOX[(temp >> 0) & 0xFF] << 0) ; + +temp ^= rcon; + +result = ((uint64_t)temp << 32) | temp; + +WRITE_RD(result); + diff --git a/vendor/riscv-isa-sim/riscv/insns/aes64ks2.h b/vendor/riscv-isa-sim/riscv/insns/aes64ks2.h new file mode 100644 index 00000000..65d5a77c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/aes64ks2.h @@ -0,0 +1,16 @@ + +#include "aes_common.h" + +require_rv64; +require_either_extension(EXT_ZKND, EXT_ZKNE); + +uint32_t rs1_hi = RS1 >> 32; +uint32_t rs2_lo = RS2 ; +uint32_t rs2_hi = RS2 >> 32; + +uint32_t r_lo = (rs1_hi ^ rs2_lo ) ; +uint32_t r_hi = (rs1_hi ^ rs2_lo ^ rs2_hi) ; +uint64_t result = ((uint64_t)r_hi << 32) | r_lo ; + +WRITE_RD(result); + diff --git a/vendor/riscv-isa-sim/riscv/insns/aes_common.h b/vendor/riscv-isa-sim/riscv/insns/aes_common.h new file mode 100644 index 00000000..9cc353c1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/aes_common.h @@ -0,0 +1,156 @@ + +uint8_t AES_ENC_SBOX[]= { + 0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, + 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76, + 0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, + 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0, + 0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, + 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15, + 0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, + 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75, + 0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, + 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84, + 0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, + 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF, + 0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, + 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8, + 0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, + 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2, + 0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, + 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73, + 0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, + 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB, + 0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, + 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79, + 0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, + 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08, + 0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, + 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A, + 0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, + 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E, + 0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, + 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF, + 0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, + 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16 +}; + +uint8_t AES_DEC_SBOX[] = { + 0x52, 0x09, 0x6A, 0xD5, 0x30, 0x36, 0xA5, 0x38, + 0xBF, 0x40, 0xA3, 0x9E, 0x81, 0xF3, 0xD7, 0xFB, + 0x7C, 0xE3, 0x39, 0x82, 0x9B, 0x2F, 0xFF, 0x87, + 0x34, 0x8E, 0x43, 0x44, 0xC4, 0xDE, 0xE9, 0xCB, + 0x54, 0x7B, 0x94, 0x32, 0xA6, 0xC2, 0x23, 0x3D, + 0xEE, 0x4C, 0x95, 0x0B, 0x42, 0xFA, 0xC3, 0x4E, + 0x08, 0x2E, 0xA1, 0x66, 0x28, 0xD9, 0x24, 0xB2, + 0x76, 0x5B, 0xA2, 0x49, 0x6D, 0x8B, 0xD1, 0x25, + 0x72, 0xF8, 0xF6, 0x64, 0x86, 0x68, 0x98, 0x16, + 0xD4, 0xA4, 0x5C, 0xCC, 0x5D, 0x65, 0xB6, 0x92, + 0x6C, 0x70, 0x48, 0x50, 0xFD, 0xED, 0xB9, 0xDA, + 0x5E, 0x15, 0x46, 0x57, 0xA7, 0x8D, 0x9D, 0x84, + 0x90, 0xD8, 0xAB, 0x00, 0x8C, 0xBC, 0xD3, 0x0A, + 0xF7, 0xE4, 0x58, 0x05, 0xB8, 0xB3, 0x45, 0x06, + 0xD0, 0x2C, 0x1E, 0x8F, 0xCA, 0x3F, 0x0F, 0x02, + 0xC1, 0xAF, 0xBD, 0x03, 0x01, 0x13, 0x8A, 0x6B, + 0x3A, 0x91, 0x11, 0x41, 0x4F, 0x67, 0xDC, 0xEA, + 0x97, 0xF2, 0xCF, 0xCE, 0xF0, 0xB4, 0xE6, 0x73, + 0x96, 0xAC, 0x74, 0x22, 0xE7, 0xAD, 0x35, 0x85, + 0xE2, 0xF9, 0x37, 0xE8, 0x1C, 0x75, 0xDF, 0x6E, + 0x47, 0xF1, 0x1A, 0x71, 0x1D, 0x29, 0xC5, 0x89, + 0x6F, 0xB7, 0x62, 0x0E, 0xAA, 0x18, 0xBE, 0x1B, + 0xFC, 0x56, 0x3E, 0x4B, 0xC6, 0xD2, 0x79, 0x20, + 0x9A, 0xDB, 0xC0, 0xFE, 0x78, 0xCD, 0x5A, 0xF4, + 0x1F, 0xDD, 0xA8, 0x33, 0x88, 0x07, 0xC7, 0x31, + 0xB1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xEC, 0x5F, + 0x60, 0x51, 0x7F, 0xA9, 0x19, 0xB5, 0x4A, 0x0D, + 0x2D, 0xE5, 0x7A, 0x9F, 0x93, 0xC9, 0x9C, 0xEF, + 0xA0, 0xE0, 0x3B, 0x4D, 0xAE, 0x2A, 0xF5, 0xB0, + 0xC8, 0xEB, 0xBB, 0x3C, 0x83, 0x53, 0x99, 0x61, + 0x17, 0x2B, 0x04, 0x7E, 0xBA, 0x77, 0xD6, 0x26, + 0xE1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0C, 0x7D +}; + +#define AES_UNPACK_BYTES(b0,b1,b2,b3) \ + uint8_t b0 = (RS1 >> 0) & 0xFF; \ + uint8_t b1 = (RS2 >> 8) & 0xFF; \ + uint8_t b2 = (RS1 >> 16) & 0xFF; \ + uint8_t b3 = (RS2 >> 24) & 0xFF; \ + +#define AES_PACK_BYTES(b0,b1,b2,b3) ( \ + (uint32_t)b0 << 0 | \ + (uint32_t)b1 << 8 | \ + (uint32_t)b2 << 16 | \ + (uint32_t)b3 << 24 ) + +#define AES_SBOX(b0, b1, b2, b3) \ + b0 = AES_ENC_SBOX[b0]; \ + b1 = AES_ENC_SBOX[b1]; \ + b2 = AES_ENC_SBOX[b2]; \ + b3 = AES_ENC_SBOX[b3]; \ + +#define AES_RSBOX(b0, b1, b2, b3) \ + b0 = AES_DEC_SBOX[b0]; \ + b1 = AES_DEC_SBOX[b1]; \ + b2 = AES_DEC_SBOX[b2]; \ + b3 = AES_DEC_SBOX[b3]; \ + +#define AES_XTIME(a) \ + ((a << 1) ^ ((a&0x80) ? 0x1b : 0)) + +#define AES_GFMUL(a,b) (( \ + ( ( (b) & 0x1 ) ? (a) : 0 ) ^ \ + ( ( (b) & 0x2 ) ? AES_XTIME(a) : 0 ) ^ \ + ( ( (b) & 0x4 ) ? AES_XTIME(AES_XTIME(a)) : 0 ) ^ \ + ( ( (b) & 0x8 ) ? AES_XTIME(AES_XTIME(AES_XTIME(a))) : 0 ) )&0xFF) + +#define BY(X,I) ((X >> (8*I)) & 0xFF) + +#define AES_SHIFROWS_LO(RS1,RS2) ( \ + (((RS1 >> 24) & 0xFF) << 56) | \ + (((RS2 >> 48) & 0xFF) << 48) | \ + (((RS2 >> 8) & 0xFF) << 40) | \ + (((RS1 >> 32) & 0xFF) << 32) | \ + (((RS2 >> 56) & 0xFF) << 24) | \ + (((RS2 >> 16) & 0xFF) << 16) | \ + (((RS1 >> 40) & 0xFF) << 8) | \ + (((RS1 >> 0) & 0xFF) << 0) ) + +#define AES_INVSHIFROWS_LO(RS1,RS2) ( \ + (((RS2 >> 24) & 0xFF) << 56) | \ + (((RS2 >> 48) & 0xFF) << 48) | \ + (((RS1 >> 8) & 0xFF) << 40) | \ + (((RS1 >> 32) & 0xFF) << 32) | \ + (((RS1 >> 56) & 0xFF) << 24) | \ + (((RS2 >> 16) & 0xFF) << 16) | \ + (((RS2 >> 40) & 0xFF) << 8) | \ + (((RS1 >> 0) & 0xFF) << 0) ) + + +#define AES_MIXBYTE(COL,B0,B1,B2,B3) ( \ + BY(COL,B3) ^ \ + BY(COL,B2) ^ \ + AES_GFMUL(BY(COL,B1), 3) ^ \ + AES_GFMUL(BY(COL,B0), 2) \ +) + +#define AES_MIXCOLUMN(COL) ( \ + AES_MIXBYTE(COL,3,0,1,2) << 24 | \ + AES_MIXBYTE(COL,2,3,0,1) << 16 | \ + AES_MIXBYTE(COL,1,2,3,0) << 8 | \ + AES_MIXBYTE(COL,0,1,2,3) << 0 \ +) + + +#define AES_INVMIXBYTE(COL,B0,B1,B2,B3) ( \ + AES_GFMUL(BY(COL,B3),0x9) ^ \ + AES_GFMUL(BY(COL,B2),0xd) ^ \ + AES_GFMUL(BY(COL,B1),0xb) ^ \ + AES_GFMUL(BY(COL,B0),0xe) \ +) + +#define AES_INVMIXCOLUMN(COL) ( \ + AES_INVMIXBYTE(COL,3,0,1,2) << 24 | \ + AES_INVMIXBYTE(COL,2,3,0,1) << 16 | \ + AES_INVMIXBYTE(COL,1,2,3,0) << 8 | \ + AES_INVMIXBYTE(COL,0,1,2,3) << 0 \ +) + diff --git a/vendor/riscv-isa-sim/riscv/insns/amoadd_d.h b/vendor/riscv-isa-sim/riscv/insns/amoadd_d.h new file mode 100644 index 00000000..6090fbc5 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amoadd_d.h @@ -0,0 +1,3 @@ +require_extension('A'); +require_rv64; +WRITE_RD(MMU.amo_uint64(RS1, [&](uint64_t lhs) { return lhs + RS2; })); diff --git a/vendor/riscv-isa-sim/riscv/insns/amoadd_w.h b/vendor/riscv-isa-sim/riscv/insns/amoadd_w.h new file mode 100644 index 00000000..2c6471af --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amoadd_w.h @@ -0,0 +1,2 @@ +require_extension('A'); +WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](uint32_t lhs) { return lhs + RS2; }))); diff --git a/vendor/riscv-isa-sim/riscv/insns/amoand_d.h b/vendor/riscv-isa-sim/riscv/insns/amoand_d.h new file mode 100644 index 00000000..80aea184 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amoand_d.h @@ -0,0 +1,3 @@ +require_extension('A'); +require_rv64; +WRITE_RD(MMU.amo_uint64(RS1, [&](uint64_t lhs) { return lhs & RS2; })); diff --git a/vendor/riscv-isa-sim/riscv/insns/amoand_w.h b/vendor/riscv-isa-sim/riscv/insns/amoand_w.h new file mode 100644 index 00000000..f7e1ba7c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amoand_w.h @@ -0,0 +1,2 @@ +require_extension('A'); +WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](uint32_t lhs) { return lhs & RS2; }))); diff --git a/vendor/riscv-isa-sim/riscv/insns/amomax_d.h b/vendor/riscv-isa-sim/riscv/insns/amomax_d.h new file mode 100644 index 00000000..496d8ada --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amomax_d.h @@ -0,0 +1,3 @@ +require_extension('A'); +require_rv64; +WRITE_RD(MMU.amo_uint64(RS1, [&](int64_t lhs) { return std::max(lhs, int64_t(RS2)); })); diff --git a/vendor/riscv-isa-sim/riscv/insns/amomax_w.h b/vendor/riscv-isa-sim/riscv/insns/amomax_w.h new file mode 100644 index 00000000..757bdd2c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amomax_w.h @@ -0,0 +1,2 @@ +require_extension('A'); +WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](int32_t lhs) { return std::max(lhs, int32_t(RS2)); }))); diff --git a/vendor/riscv-isa-sim/riscv/insns/amomaxu_d.h b/vendor/riscv-isa-sim/riscv/insns/amomaxu_d.h new file mode 100644 index 00000000..12b17331 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amomaxu_d.h @@ -0,0 +1,3 @@ +require_extension('A'); +require_rv64; +WRITE_RD(MMU.amo_uint64(RS1, [&](uint64_t lhs) { return std::max(lhs, RS2); })); diff --git a/vendor/riscv-isa-sim/riscv/insns/amomaxu_w.h b/vendor/riscv-isa-sim/riscv/insns/amomaxu_w.h new file mode 100644 index 00000000..538df1c4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amomaxu_w.h @@ -0,0 +1,2 @@ +require_extension('A'); +WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](uint32_t lhs) { return std::max(lhs, uint32_t(RS2)); }))); diff --git a/vendor/riscv-isa-sim/riscv/insns/amomin_d.h b/vendor/riscv-isa-sim/riscv/insns/amomin_d.h new file mode 100644 index 00000000..725d9839 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amomin_d.h @@ -0,0 +1,3 @@ +require_extension('A'); +require_rv64; +WRITE_RD(MMU.amo_uint64(RS1, [&](int64_t lhs) { return std::min(lhs, int64_t(RS2)); })); diff --git a/vendor/riscv-isa-sim/riscv/insns/amomin_w.h b/vendor/riscv-isa-sim/riscv/insns/amomin_w.h new file mode 100644 index 00000000..ee53faa0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amomin_w.h @@ -0,0 +1,2 @@ +require_extension('A'); +WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](int32_t lhs) { return std::min(lhs, int32_t(RS2)); }))); diff --git a/vendor/riscv-isa-sim/riscv/insns/amominu_d.h b/vendor/riscv-isa-sim/riscv/insns/amominu_d.h new file mode 100644 index 00000000..15b6c0a4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amominu_d.h @@ -0,0 +1,3 @@ +require_extension('A'); +require_rv64; +WRITE_RD(MMU.amo_uint64(RS1, [&](uint64_t lhs) { return std::min(lhs, RS2); })); diff --git a/vendor/riscv-isa-sim/riscv/insns/amominu_w.h b/vendor/riscv-isa-sim/riscv/insns/amominu_w.h new file mode 100644 index 00000000..52e1141b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amominu_w.h @@ -0,0 +1,2 @@ +require_extension('A'); +WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](uint32_t lhs) { return std::min(lhs, uint32_t(RS2)); }))); diff --git a/vendor/riscv-isa-sim/riscv/insns/amoor_d.h b/vendor/riscv-isa-sim/riscv/insns/amoor_d.h new file mode 100644 index 00000000..de876274 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amoor_d.h @@ -0,0 +1,3 @@ +require_extension('A'); +require_rv64; +WRITE_RD(MMU.amo_uint64(RS1, [&](uint64_t lhs) { return lhs | RS2; })); diff --git a/vendor/riscv-isa-sim/riscv/insns/amoor_w.h b/vendor/riscv-isa-sim/riscv/insns/amoor_w.h new file mode 100644 index 00000000..3455981d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amoor_w.h @@ -0,0 +1,2 @@ +require_extension('A'); +WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](uint32_t lhs) { return lhs | RS2; }))); diff --git a/vendor/riscv-isa-sim/riscv/insns/amoswap_d.h b/vendor/riscv-isa-sim/riscv/insns/amoswap_d.h new file mode 100644 index 00000000..e1bffdeb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amoswap_d.h @@ -0,0 +1,3 @@ +require_extension('A'); +require_rv64; +WRITE_RD(MMU.amo_uint64(RS1, [&](uint64_t lhs) { return RS2; })); diff --git a/vendor/riscv-isa-sim/riscv/insns/amoswap_w.h b/vendor/riscv-isa-sim/riscv/insns/amoswap_w.h new file mode 100644 index 00000000..0f78369c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amoswap_w.h @@ -0,0 +1,2 @@ +require_extension('A'); +WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](uint32_t lhs) { return RS2; }))); diff --git a/vendor/riscv-isa-sim/riscv/insns/amoxor_d.h b/vendor/riscv-isa-sim/riscv/insns/amoxor_d.h new file mode 100644 index 00000000..1b3c0bf4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amoxor_d.h @@ -0,0 +1,3 @@ +require_extension('A'); +require_rv64; +WRITE_RD(MMU.amo_uint64(RS1, [&](uint64_t lhs) { return lhs ^ RS2; })); diff --git a/vendor/riscv-isa-sim/riscv/insns/amoxor_w.h b/vendor/riscv-isa-sim/riscv/insns/amoxor_w.h new file mode 100644 index 00000000..a1ea82f1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amoxor_w.h @@ -0,0 +1,2 @@ +require_extension('A'); +WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](uint32_t lhs) { return lhs ^ RS2; }))); diff --git a/vendor/riscv-isa-sim/riscv/insns/and.h b/vendor/riscv-isa-sim/riscv/insns/and.h new file mode 100644 index 00000000..86b48831 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/and.h @@ -0,0 +1 @@ +WRITE_RD(RS1 & RS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/andi.h b/vendor/riscv-isa-sim/riscv/insns/andi.h new file mode 100644 index 00000000..bcc51e44 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/andi.h @@ -0,0 +1 @@ +WRITE_RD(insn.i_imm() & RS1); diff --git a/vendor/riscv-isa-sim/riscv/insns/andn.h b/vendor/riscv-isa-sim/riscv/insns/andn.h new file mode 100644 index 00000000..8add1919 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/andn.h @@ -0,0 +1,2 @@ +require_either_extension(EXT_ZBB, EXT_ZBKB); +WRITE_RD(RS1 & ~RS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/auipc.h b/vendor/riscv-isa-sim/riscv/insns/auipc.h new file mode 100644 index 00000000..1a2b169b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/auipc.h @@ -0,0 +1 @@ +WRITE_RD(sext_xlen(insn.u_imm() + pc)); diff --git a/vendor/riscv-isa-sim/riscv/insns/ave.h b/vendor/riscv-isa-sim/riscv/insns/ave.h new file mode 100644 index 00000000..59979002 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ave.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZPN); +sreg_t rs1 = RS1; +sreg_t rs2 = RS2; +sreg_t carry = (rs1 & 1) | (rs2 & 1); +WRITE_RD(sext_xlen((rs1 >> 1) + (rs2 >> 1) + carry)); diff --git a/vendor/riscv-isa-sim/riscv/insns/bclr.h b/vendor/riscv-isa-sim/riscv/insns/bclr.h new file mode 100644 index 00000000..589273e6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bclr.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZBS); +int shamt = RS2 & (xlen-1); +WRITE_RD(sext_xlen(RS1 & ~(1LL << shamt))); diff --git a/vendor/riscv-isa-sim/riscv/insns/bclri.h b/vendor/riscv-isa-sim/riscv/insns/bclri.h new file mode 100644 index 00000000..8df6a5f4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bclri.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZBS); +int shamt = SHAMT & (xlen-1); +WRITE_RD(sext_xlen(RS1 & ~(1LL << shamt))); diff --git a/vendor/riscv-isa-sim/riscv/insns/bcompress.h b/vendor/riscv-isa-sim/riscv/insns/bcompress.h new file mode 100644 index 00000000..579346f4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bcompress.h @@ -0,0 +1,9 @@ +require_extension(EXT_XZBE); +uint64_t c = 0, i = 0, data = zext_xlen(RS1), mask = zext_xlen(RS2); +while (mask) { + uint64_t b = mask & ~((mask | (mask-1)) + 1); + c |= (data & b) >> (ctz(b) - i); + i += popcount(b); + mask -= b; +} +WRITE_RD(sext_xlen(c)); diff --git a/vendor/riscv-isa-sim/riscv/insns/bcompressw.h b/vendor/riscv-isa-sim/riscv/insns/bcompressw.h new file mode 100644 index 00000000..2c1017cd --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bcompressw.h @@ -0,0 +1,10 @@ +require_rv64; +require_extension(EXT_XZBE); +uint64_t c = 0, i = 0, data = zext32(RS1), mask = zext32(RS2); +while (mask) { + uint64_t b = mask & ~((mask | (mask-1)) + 1); + c |= (data & b) >> (ctz(b) - i); + i += popcount(b); + mask -= b; +} +WRITE_RD(sext32(c)); diff --git a/vendor/riscv-isa-sim/riscv/insns/bdecompress.h b/vendor/riscv-isa-sim/riscv/insns/bdecompress.h new file mode 100644 index 00000000..2894be01 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bdecompress.h @@ -0,0 +1,9 @@ +require_extension(EXT_XZBE); +uint64_t c = 0, i = 0, data = zext_xlen(RS1), mask = zext_xlen(RS2); +while (mask) { + uint64_t b = mask & ~((mask | (mask-1)) + 1); + c |= (data << (ctz(b) - i)) & b; + i += popcount(b); + mask -= b; +} +WRITE_RD(sext_xlen(c)); diff --git a/vendor/riscv-isa-sim/riscv/insns/bdecompressw.h b/vendor/riscv-isa-sim/riscv/insns/bdecompressw.h new file mode 100644 index 00000000..468a7260 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bdecompressw.h @@ -0,0 +1,10 @@ +require_rv64; +require_extension(EXT_XZBE); +uint64_t c = 0, i = 0, data = zext32(RS1), mask = zext32(RS2); +while (mask) { + uint64_t b = mask & ~((mask | (mask-1)) + 1); + c |= (data << (ctz(b) - i)) & b; + i += popcount(b); + mask -= b; +} +WRITE_RD(sext32(c)); diff --git a/vendor/riscv-isa-sim/riscv/insns/beq.h b/vendor/riscv-isa-sim/riscv/insns/beq.h new file mode 100644 index 00000000..fd7e0614 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/beq.h @@ -0,0 +1,2 @@ +if(RS1 == RS2) + set_pc(BRANCH_TARGET); diff --git a/vendor/riscv-isa-sim/riscv/insns/bext.h b/vendor/riscv-isa-sim/riscv/insns/bext.h new file mode 100644 index 00000000..24c80b07 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bext.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZBS); +int shamt = RS2 & (xlen-1); +WRITE_RD(sext_xlen(1 & (RS1 >> shamt))); diff --git a/vendor/riscv-isa-sim/riscv/insns/bexti.h b/vendor/riscv-isa-sim/riscv/insns/bexti.h new file mode 100644 index 00000000..31d23166 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bexti.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZBS); +int shamt = SHAMT & (xlen-1); +WRITE_RD(sext_xlen(1 & (RS1 >> shamt))); diff --git a/vendor/riscv-isa-sim/riscv/insns/bfp.h b/vendor/riscv-isa-sim/riscv/insns/bfp.h new file mode 100644 index 00000000..886d8405 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bfp.h @@ -0,0 +1,10 @@ +require_extension(EXT_XZBF); +reg_t cfg = RS2 >> (xlen/2); +if ((cfg >> 30) == 2) + cfg = cfg >> 16; +int len = (cfg >> 8) & (xlen/2-1); +int off = cfg & (xlen-1); +len = len ? len : xlen/2; +reg_t mask = ~(~reg_t(0) << len) << off; +reg_t data = RS2 << off; +WRITE_RD(sext_xlen((data & mask) | (RS1 & ~mask))); diff --git a/vendor/riscv-isa-sim/riscv/insns/bfpw.h b/vendor/riscv-isa-sim/riscv/insns/bfpw.h new file mode 100644 index 00000000..42479e72 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bfpw.h @@ -0,0 +1,9 @@ +require_rv64; +require_extension(EXT_XZBF); +reg_t cfg = RS2 >> 16; +int len = (cfg >> 8) & 15; +int off = cfg & 31; +len = len ? len : 16; +reg_t mask = ~(~reg_t(0) << len) << off; +reg_t data = RS2 << off; +WRITE_RD(sext32((data & mask) | (RS1 & ~mask))); diff --git a/vendor/riscv-isa-sim/riscv/insns/bge.h b/vendor/riscv-isa-sim/riscv/insns/bge.h new file mode 100644 index 00000000..da0c68e6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bge.h @@ -0,0 +1,2 @@ +if(sreg_t(RS1) >= sreg_t(RS2)) + set_pc(BRANCH_TARGET); diff --git a/vendor/riscv-isa-sim/riscv/insns/bgeu.h b/vendor/riscv-isa-sim/riscv/insns/bgeu.h new file mode 100644 index 00000000..d764a347 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bgeu.h @@ -0,0 +1,2 @@ +if(RS1 >= RS2) + set_pc(BRANCH_TARGET); diff --git a/vendor/riscv-isa-sim/riscv/insns/binv.h b/vendor/riscv-isa-sim/riscv/insns/binv.h new file mode 100644 index 00000000..cef5b780 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/binv.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZBS); +int shamt = RS2 & (xlen-1); +WRITE_RD(sext_xlen(RS1 ^ (1LL << shamt))); diff --git a/vendor/riscv-isa-sim/riscv/insns/binvi.h b/vendor/riscv-isa-sim/riscv/insns/binvi.h new file mode 100644 index 00000000..3272d393 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/binvi.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZBS); +int shamt = SHAMT & (xlen-1); +WRITE_RD(sext_xlen(RS1 ^ (1LL << shamt))); diff --git a/vendor/riscv-isa-sim/riscv/insns/blt.h b/vendor/riscv-isa-sim/riscv/insns/blt.h new file mode 100644 index 00000000..c54fb769 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/blt.h @@ -0,0 +1,2 @@ +if(sreg_t(RS1) < sreg_t(RS2)) + set_pc(BRANCH_TARGET); diff --git a/vendor/riscv-isa-sim/riscv/insns/bltu.h b/vendor/riscv-isa-sim/riscv/insns/bltu.h new file mode 100644 index 00000000..ff75e8a6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bltu.h @@ -0,0 +1,2 @@ +if(RS1 < RS2) + set_pc(BRANCH_TARGET); diff --git a/vendor/riscv-isa-sim/riscv/insns/bmatflip.h b/vendor/riscv-isa-sim/riscv/insns/bmatflip.h new file mode 100644 index 00000000..c10df8f9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bmatflip.h @@ -0,0 +1,11 @@ +require_rv64; +require_extension(EXT_XZBM); +reg_t x = RS1; +for (int i = 0; i < 3; i++) { + x = (x & 0xFFFF00000000FFFFLL) | ((x & 0x0000FFFF00000000LL) >> 16) | ((x & 0x00000000FFFF0000LL) << 16); + x = (x & 0xFF0000FFFF0000FFLL) | ((x & 0x00FF000000FF0000LL) >> 8) | ((x & 0x0000FF000000FF00LL) << 8); + x = (x & 0xF00FF00FF00FF00FLL) | ((x & 0x0F000F000F000F00LL) >> 4) | ((x & 0x00F000F000F000F0LL) << 4); + x = (x & 0xC3C3C3C3C3C3C3C3LL) | ((x & 0x3030303030303030LL) >> 2) | ((x & 0x0C0C0C0C0C0C0C0CLL) << 2); + x = (x & 0x9999999999999999LL) | ((x & 0x4444444444444444LL) >> 1) | ((x & 0x2222222222222222LL) << 1); +} +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/bmator.h b/vendor/riscv-isa-sim/riscv/insns/bmator.h new file mode 100644 index 00000000..33057ca0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bmator.h @@ -0,0 +1,29 @@ +require_rv64; +require_extension(EXT_XZBM); + +// transpose of rs2 +int64_t rs2t = RS2; +for (int i = 0; i < 3; i++) { + rs2t = (rs2t & 0xFFFF00000000FFFFLL) | ((rs2t & 0x0000FFFF00000000LL) >> 16) | ((rs2t & 0x00000000FFFF0000LL) << 16); + rs2t = (rs2t & 0xFF0000FFFF0000FFLL) | ((rs2t & 0x00FF000000FF0000LL) >> 8) | ((rs2t & 0x0000FF000000FF00LL) << 8); + rs2t = (rs2t & 0xF00FF00FF00FF00FLL) | ((rs2t & 0x0F000F000F000F00LL) >> 4) | ((rs2t & 0x00F000F000F000F0LL) << 4); + rs2t = (rs2t & 0xC3C3C3C3C3C3C3C3LL) | ((rs2t & 0x3030303030303030LL) >> 2) | ((rs2t & 0x0C0C0C0C0C0C0C0CLL) << 2); + rs2t = (rs2t & 0x9999999999999999LL) | ((rs2t & 0x4444444444444444LL) >> 1) | ((rs2t & 0x2222222222222222LL) << 1); +} + +int64_t rs1 = RS1; +uint8_t u[8]; // rows of rs1 +uint8_t v[8]; // cols of rs2 + +for (int i = 0; i < 8; i++) { + u[i] = rs1 >> (i*8); + v[i] = rs2t >> (i*8); +} + +uint64_t x = 0; +for (int i = 0; i < 64; i++) { + if ((u[i / 8] & v[i % 8]) != 0) + x |= 1LL << i; +} + +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/bmatxor.h b/vendor/riscv-isa-sim/riscv/insns/bmatxor.h new file mode 100644 index 00000000..ca2d0967 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bmatxor.h @@ -0,0 +1,29 @@ +require_rv64; +require_extension(EXT_XZBM); + +// transpose of rs2 +int64_t rs2t = RS2; +for (int i = 0; i < 3; i++) { + rs2t = (rs2t & 0xFFFF00000000FFFFLL) | ((rs2t & 0x0000FFFF00000000LL) >> 16) | ((rs2t & 0x00000000FFFF0000LL) << 16); + rs2t = (rs2t & 0xFF0000FFFF0000FFLL) | ((rs2t & 0x00FF000000FF0000LL) >> 8) | ((rs2t & 0x0000FF000000FF00LL) << 8); + rs2t = (rs2t & 0xF00FF00FF00FF00FLL) | ((rs2t & 0x0F000F000F000F00LL) >> 4) | ((rs2t & 0x00F000F000F000F0LL) << 4); + rs2t = (rs2t & 0xC3C3C3C3C3C3C3C3LL) | ((rs2t & 0x3030303030303030LL) >> 2) | ((rs2t & 0x0C0C0C0C0C0C0C0CLL) << 2); + rs2t = (rs2t & 0x9999999999999999LL) | ((rs2t & 0x4444444444444444LL) >> 1) | ((rs2t & 0x2222222222222222LL) << 1); +} + +int64_t rs1 = RS1; +uint8_t u[8]; // rows of rs1 +uint8_t v[8]; // cols of rs2 + +for (int i = 0; i < 8; i++) { + u[i] = rs1 >> (i*8); + v[i] = rs2t >> (i*8); +} + +uint64_t x = 0; +for (int i = 0; i < 64; i++) { + if (popcount(u[i / 8] & v[i % 8]) & 1) + x |= 1LL << i; +} + +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/bne.h b/vendor/riscv-isa-sim/riscv/insns/bne.h new file mode 100644 index 00000000..1e6cb7c7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bne.h @@ -0,0 +1,2 @@ +if(RS1 != RS2) + set_pc(BRANCH_TARGET); diff --git a/vendor/riscv-isa-sim/riscv/insns/bset.h b/vendor/riscv-isa-sim/riscv/insns/bset.h new file mode 100644 index 00000000..9009fb32 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bset.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZBS); +int shamt = RS2 & (xlen-1); +WRITE_RD(sext_xlen(RS1 | (1LL << shamt))); diff --git a/vendor/riscv-isa-sim/riscv/insns/bseti.h b/vendor/riscv-isa-sim/riscv/insns/bseti.h new file mode 100644 index 00000000..49523786 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bseti.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZBS); +int shamt = SHAMT & (xlen-1); +WRITE_RD(sext_xlen(RS1 | (1LL << shamt))); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_add.h b/vendor/riscv-isa-sim/riscv/insns/c_add.h new file mode 100644 index 00000000..ab7d4d4c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_add.h @@ -0,0 +1,3 @@ +require_extension('C'); +require(insn.rvc_rs2() != 0); +WRITE_RD(sext_xlen(RVC_RS1 + RVC_RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_addi.h b/vendor/riscv-isa-sim/riscv/insns/c_addi.h new file mode 100644 index 00000000..eb983442 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_addi.h @@ -0,0 +1,2 @@ +require_extension('C'); +WRITE_RD(sext_xlen(RVC_RS1 + insn.rvc_imm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_addi4spn.h b/vendor/riscv-isa-sim/riscv/insns/c_addi4spn.h new file mode 100644 index 00000000..e5f3832f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_addi4spn.h @@ -0,0 +1,3 @@ +require_extension('C'); +require(insn.rvc_addi4spn_imm() != 0); +WRITE_RVC_RS2S(sext_xlen(RVC_SP + insn.rvc_addi4spn_imm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_addw.h b/vendor/riscv-isa-sim/riscv/insns/c_addw.h new file mode 100644 index 00000000..6e0ae3a5 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_addw.h @@ -0,0 +1,3 @@ +require_extension('C'); +require_rv64; +WRITE_RVC_RS1S(sext32(RVC_RS1S + RVC_RS2S)); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_and.h b/vendor/riscv-isa-sim/riscv/insns/c_and.h new file mode 100644 index 00000000..4d7bab6c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_and.h @@ -0,0 +1,2 @@ +require_extension('C'); +WRITE_RVC_RS1S(RVC_RS1S & RVC_RS2S); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_andi.h b/vendor/riscv-isa-sim/riscv/insns/c_andi.h new file mode 100644 index 00000000..9de5a1ac --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_andi.h @@ -0,0 +1,2 @@ +require_extension('C'); +WRITE_RVC_RS1S(RVC_RS1S & insn.rvc_imm()); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_beqz.h b/vendor/riscv-isa-sim/riscv/insns/c_beqz.h new file mode 100644 index 00000000..35c11960 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_beqz.h @@ -0,0 +1,3 @@ +require_extension('C'); +if (RVC_RS1S == 0) + set_pc(pc + insn.rvc_b_imm()); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_bnez.h b/vendor/riscv-isa-sim/riscv/insns/c_bnez.h new file mode 100644 index 00000000..1e40ea78 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_bnez.h @@ -0,0 +1,3 @@ +require_extension('C'); +if (RVC_RS1S != 0) + set_pc(pc + insn.rvc_b_imm()); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_ebreak.h b/vendor/riscv-isa-sim/riscv/insns/c_ebreak.h new file mode 100644 index 00000000..7d04f46d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_ebreak.h @@ -0,0 +1,2 @@ +require_extension('C'); +throw trap_breakpoint(STATE.v, pc); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_fld.h b/vendor/riscv-isa-sim/riscv/insns/c_fld.h new file mode 100644 index 00000000..319615b8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_fld.h @@ -0,0 +1,4 @@ +require_extension('C'); +require_extension('D'); +require_fp; +WRITE_RVC_FRS2S(f64(MMU.load_uint64(RVC_RS1S + insn.rvc_ld_imm()))); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_fldsp.h b/vendor/riscv-isa-sim/riscv/insns/c_fldsp.h new file mode 100644 index 00000000..534eef7d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_fldsp.h @@ -0,0 +1,4 @@ +require_extension('C'); +require_extension('D'); +require_fp; +WRITE_FRD(f64(MMU.load_uint64(RVC_SP + insn.rvc_ldsp_imm()))); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_flw.h b/vendor/riscv-isa-sim/riscv/insns/c_flw.h new file mode 100644 index 00000000..682566c7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_flw.h @@ -0,0 +1,8 @@ +require_extension('C'); +if (xlen == 32) { + require_extension('F'); + require_fp; + WRITE_RVC_FRS2S(f32(MMU.load_uint32(RVC_RS1S + insn.rvc_lw_imm()))); +} else { // c.ld + WRITE_RVC_RS2S(MMU.load_int64(RVC_RS1S + insn.rvc_ld_imm())); +} diff --git a/vendor/riscv-isa-sim/riscv/insns/c_flwsp.h b/vendor/riscv-isa-sim/riscv/insns/c_flwsp.h new file mode 100644 index 00000000..79058c40 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_flwsp.h @@ -0,0 +1,9 @@ +require_extension('C'); +if (xlen == 32) { + require_extension('F'); + require_fp; + WRITE_FRD(f32(MMU.load_uint32(RVC_SP + insn.rvc_lwsp_imm()))); +} else { // c.ldsp + require(insn.rvc_rd() != 0); + WRITE_RD(MMU.load_int64(RVC_SP + insn.rvc_ldsp_imm())); +} diff --git a/vendor/riscv-isa-sim/riscv/insns/c_fsd.h b/vendor/riscv-isa-sim/riscv/insns/c_fsd.h new file mode 100644 index 00000000..6f2c8f4c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_fsd.h @@ -0,0 +1,4 @@ +require_extension('C'); +require_extension('D'); +require_fp; +MMU.store_uint64(RVC_RS1S + insn.rvc_ld_imm(), RVC_FRS2S.v[0]); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_fsdsp.h b/vendor/riscv-isa-sim/riscv/insns/c_fsdsp.h new file mode 100644 index 00000000..27b93319 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_fsdsp.h @@ -0,0 +1,4 @@ +require_extension('C'); +require_extension('D'); +require_fp; +MMU.store_uint64(RVC_SP + insn.rvc_sdsp_imm(), RVC_FRS2.v[0]); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_fsw.h b/vendor/riscv-isa-sim/riscv/insns/c_fsw.h new file mode 100644 index 00000000..70858229 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_fsw.h @@ -0,0 +1,8 @@ +require_extension('C'); +if (xlen == 32) { + require_extension('F'); + require_fp; + MMU.store_uint32(RVC_RS1S + insn.rvc_lw_imm(), RVC_FRS2S.v[0]); +} else { // c.sd + MMU.store_uint64(RVC_RS1S + insn.rvc_ld_imm(), RVC_RS2S); +} diff --git a/vendor/riscv-isa-sim/riscv/insns/c_fswsp.h b/vendor/riscv-isa-sim/riscv/insns/c_fswsp.h new file mode 100644 index 00000000..c5a003fc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_fswsp.h @@ -0,0 +1,8 @@ +require_extension('C'); +if (xlen == 32) { + require_extension('F'); + require_fp; + MMU.store_uint32(RVC_SP + insn.rvc_swsp_imm(), RVC_FRS2.v[0]); +} else { // c.sdsp + MMU.store_uint64(RVC_SP + insn.rvc_sdsp_imm(), RVC_RS2); +} diff --git a/vendor/riscv-isa-sim/riscv/insns/c_j.h b/vendor/riscv-isa-sim/riscv/insns/c_j.h new file mode 100644 index 00000000..6d8939c4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_j.h @@ -0,0 +1,2 @@ +require_extension('C'); +set_pc(pc + insn.rvc_j_imm()); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_jal.h b/vendor/riscv-isa-sim/riscv/insns/c_jal.h new file mode 100644 index 00000000..4f156f61 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_jal.h @@ -0,0 +1,9 @@ +require_extension('C'); +if (xlen == 32) { + reg_t tmp = npc; + set_pc(pc + insn.rvc_j_imm()); + WRITE_REG(X_RA, tmp); +} else { // c.addiw + require(insn.rvc_rd() != 0); + WRITE_RD(sext32(RVC_RS1 + insn.rvc_imm())); +} diff --git a/vendor/riscv-isa-sim/riscv/insns/c_jalr.h b/vendor/riscv-isa-sim/riscv/insns/c_jalr.h new file mode 100644 index 00000000..cb1e4222 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_jalr.h @@ -0,0 +1,5 @@ +require_extension('C'); +require(insn.rvc_rs1() != 0); +reg_t tmp = npc; +set_pc(RVC_RS1 & ~reg_t(1)); +WRITE_REG(X_RA, tmp); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_jr.h b/vendor/riscv-isa-sim/riscv/insns/c_jr.h new file mode 100644 index 00000000..9c4a8ea9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_jr.h @@ -0,0 +1,3 @@ +require_extension('C'); +require(insn.rvc_rs1() != 0); +set_pc(RVC_RS1 & ~reg_t(1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_li.h b/vendor/riscv-isa-sim/riscv/insns/c_li.h new file mode 100644 index 00000000..f9fd66b2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_li.h @@ -0,0 +1,2 @@ +require_extension('C'); +WRITE_RD(insn.rvc_imm()); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_lui.h b/vendor/riscv-isa-sim/riscv/insns/c_lui.h new file mode 100644 index 00000000..75d8eb89 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_lui.h @@ -0,0 +1,8 @@ +require_extension('C'); +if (insn.rvc_rd() == 2) { // c.addi16sp + require(insn.rvc_addi16sp_imm() != 0); + WRITE_REG(X_SP, sext_xlen(RVC_SP + insn.rvc_addi16sp_imm())); +} else { + require(insn.rvc_imm() != 0); + WRITE_RD(insn.rvc_imm() << 12); +} diff --git a/vendor/riscv-isa-sim/riscv/insns/c_lw.h b/vendor/riscv-isa-sim/riscv/insns/c_lw.h new file mode 100644 index 00000000..ef49dd90 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_lw.h @@ -0,0 +1,2 @@ +require_extension('C'); +WRITE_RVC_RS2S(MMU.load_int32(RVC_RS1S + insn.rvc_lw_imm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_lwsp.h b/vendor/riscv-isa-sim/riscv/insns/c_lwsp.h new file mode 100644 index 00000000..b3d74dbf --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_lwsp.h @@ -0,0 +1,3 @@ +require_extension('C'); +require(insn.rvc_rd() != 0); +WRITE_RD(MMU.load_int32(RVC_SP + insn.rvc_lwsp_imm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_mv.h b/vendor/riscv-isa-sim/riscv/insns/c_mv.h new file mode 100644 index 00000000..a03d0d07 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_mv.h @@ -0,0 +1,3 @@ +require_extension('C'); +require(insn.rvc_rs2() != 0); +WRITE_RD(RVC_RS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_or.h b/vendor/riscv-isa-sim/riscv/insns/c_or.h new file mode 100644 index 00000000..56436d1a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_or.h @@ -0,0 +1,2 @@ +require_extension('C'); +WRITE_RVC_RS1S(RVC_RS1S | RVC_RS2S); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_slli.h b/vendor/riscv-isa-sim/riscv/insns/c_slli.h new file mode 100644 index 00000000..24fbb133 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_slli.h @@ -0,0 +1,3 @@ +require_extension('C'); +require(insn.rvc_zimm() < xlen); +WRITE_RD(sext_xlen(RVC_RS1 << insn.rvc_zimm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_srai.h b/vendor/riscv-isa-sim/riscv/insns/c_srai.h new file mode 100644 index 00000000..f6638b1e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_srai.h @@ -0,0 +1,3 @@ +require_extension('C'); +require(insn.rvc_zimm() < xlen); +WRITE_RVC_RS1S(sext_xlen(sext_xlen(RVC_RS1S) >> insn.rvc_zimm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_srli.h b/vendor/riscv-isa-sim/riscv/insns/c_srli.h new file mode 100644 index 00000000..f410fefd --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_srli.h @@ -0,0 +1,3 @@ +require_extension('C'); +require(insn.rvc_zimm() < xlen); +WRITE_RVC_RS1S(sext_xlen(zext_xlen(RVC_RS1S) >> insn.rvc_zimm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_sub.h b/vendor/riscv-isa-sim/riscv/insns/c_sub.h new file mode 100644 index 00000000..1b8e3735 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_sub.h @@ -0,0 +1,2 @@ +require_extension('C'); +WRITE_RVC_RS1S(sext_xlen(RVC_RS1S - RVC_RS2S)); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_subw.h b/vendor/riscv-isa-sim/riscv/insns/c_subw.h new file mode 100644 index 00000000..580f5b54 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_subw.h @@ -0,0 +1,3 @@ +require_extension('C'); +require_rv64; +WRITE_RVC_RS1S(sext32(RVC_RS1S - RVC_RS2S)); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_sw.h b/vendor/riscv-isa-sim/riscv/insns/c_sw.h new file mode 100644 index 00000000..3073e9d6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_sw.h @@ -0,0 +1,2 @@ +require_extension('C'); +MMU.store_uint32(RVC_RS1S + insn.rvc_lw_imm(), RVC_RS2S); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_swsp.h b/vendor/riscv-isa-sim/riscv/insns/c_swsp.h new file mode 100644 index 00000000..b8995ab0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_swsp.h @@ -0,0 +1,2 @@ +require_extension('C'); +MMU.store_uint32(RVC_SP + insn.rvc_swsp_imm(), RVC_RS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_xor.h b/vendor/riscv-isa-sim/riscv/insns/c_xor.h new file mode 100644 index 00000000..9981c1af --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_xor.h @@ -0,0 +1,2 @@ +require_extension('C'); +WRITE_RVC_RS1S(RVC_RS1S ^ RVC_RS2S); diff --git a/vendor/riscv-isa-sim/riscv/insns/cbo_clean.h b/vendor/riscv-isa-sim/riscv/insns/cbo_clean.h new file mode 100644 index 00000000..201fa447 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/cbo_clean.h @@ -0,0 +1,4 @@ +require_extension(EXT_ZICBOM); +DECLARE_XENVCFG_VARS(CBCFE); +require_envcfg(CBCFE); +MMU.clean_inval(RS1, true, false); diff --git a/vendor/riscv-isa-sim/riscv/insns/cbo_flush.h b/vendor/riscv-isa-sim/riscv/insns/cbo_flush.h new file mode 100644 index 00000000..b17f5cf1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/cbo_flush.h @@ -0,0 +1,4 @@ +require_extension(EXT_ZICBOM); +DECLARE_XENVCFG_VARS(CBCFE); +require_envcfg(CBCFE); +MMU.clean_inval(RS1, true, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/cbo_inval.h b/vendor/riscv-isa-sim/riscv/insns/cbo_inval.h new file mode 100644 index 00000000..bd80a6fd --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/cbo_inval.h @@ -0,0 +1,9 @@ +require_extension(EXT_ZICBOM); +DECLARE_XENVCFG_VARS(CBIE); +require_envcfg(CBIE); +if (((STATE.prv != PRV_M) && (mCBIE == 1)) || + ((!STATE.v && (STATE.prv == PRV_U)) && (sCBIE = 1)) || + (STATE.v && ((hCBIE == 1) || ((STATE.prv == PRV_U) && (sCBIE== 0))))) + MMU.clean_inval(RS1, true, true); +else + MMU.clean_inval(RS1, false, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/cbo_zero.h b/vendor/riscv-isa-sim/riscv/insns/cbo_zero.h new file mode 100644 index 00000000..4bbe28d3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/cbo_zero.h @@ -0,0 +1,4 @@ +require_extension(EXT_ZICBOZ); +DECLARE_XENVCFG_VARS(CBZE); +require_envcfg(CBZE); +MMU.cbo_zero(RS1); diff --git a/vendor/riscv-isa-sim/riscv/insns/clmul.h b/vendor/riscv-isa-sim/riscv/insns/clmul.h new file mode 100644 index 00000000..b8e6d6d4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/clmul.h @@ -0,0 +1,6 @@ +require_either_extension(EXT_ZBC, EXT_ZBKC); +reg_t a = zext_xlen(RS1), b = zext_xlen(RS2), x = 0; +for (int i = 0; i < xlen; i++) + if ((b >> i) & 1) + x ^= a << i; +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/clmulh.h b/vendor/riscv-isa-sim/riscv/insns/clmulh.h new file mode 100644 index 00000000..dfee94e2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/clmulh.h @@ -0,0 +1,6 @@ +require_either_extension(EXT_ZBC, EXT_ZBKC); +reg_t a = zext_xlen(RS1), b = zext_xlen(RS2), x = 0; +for (int i = 1; i < xlen; i++) + if ((b >> i) & 1) + x ^= a >> (xlen-i); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/clmulhw.h b/vendor/riscv-isa-sim/riscv/insns/clmulhw.h new file mode 100644 index 00000000..f41acb0e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/clmulhw.h @@ -0,0 +1,6 @@ +require_extension(EXT_XZBC); +reg_t a = zext32(RS1), b = zext32(RS2), x = 0; +for (int i = 1; i < 32; i++) + if ((b >> i) & 1) + x ^= a >> (32-i); +WRITE_RD(sext32(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/clmulr.h b/vendor/riscv-isa-sim/riscv/insns/clmulr.h new file mode 100644 index 00000000..ffa046d2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/clmulr.h @@ -0,0 +1,6 @@ +require_extension(EXT_ZBC); +reg_t a = zext_xlen(RS1), b = zext_xlen(RS2), x = 0; +for (int i = 0; i < xlen; i++) + if ((b >> i) & 1) + x ^= a >> (xlen-i-1); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/clmulrw.h b/vendor/riscv-isa-sim/riscv/insns/clmulrw.h new file mode 100644 index 00000000..784859ae --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/clmulrw.h @@ -0,0 +1,6 @@ +require_extension(EXT_XZBC); +reg_t a = zext32(RS1), b = zext32(RS2), x = 0; +for (int i = 0; i < 32; i++) + if ((b >> i) & 1) + x ^= a >> (31-i); +WRITE_RD(sext32(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/clmulw.h b/vendor/riscv-isa-sim/riscv/insns/clmulw.h new file mode 100644 index 00000000..5bb753fe --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/clmulw.h @@ -0,0 +1,6 @@ +require_extension(EXT_XZBC); +reg_t a = zext32(RS1), b = zext32(RS2), x = 0; +for (int i = 0; i < 32; i++) + if ((b >> i) & 1) + x ^= a << i; +WRITE_RD(sext32(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/clo16.h b/vendor/riscv-isa-sim/riscv/insns/clo16.h new file mode 100644 index 00000000..9da65993 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/clo16.h @@ -0,0 +1,11 @@ +P_ONE_LOOP(16, { + pd = 0; + ps1 = ~ps1; + if (!ps1) pd = 16; + else { + if ((ps1 & 0xFF00) == 0) { pd += 8; ps1 <<= 8; } + if ((ps1 & 0xF000) == 0) { pd += 4; ps1 <<= 4; } + if ((ps1 & 0xC000) == 0) { pd += 2; ps1 <<= 2; } + if ((ps1 & 0x8000) == 0) { pd += 1; } + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/clo32.h b/vendor/riscv-isa-sim/riscv/insns/clo32.h new file mode 100644 index 00000000..431bb0e9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/clo32.h @@ -0,0 +1,12 @@ +P_ONE_LOOP(32, { + pd = 0; + ps1 = ~ps1; + if (!ps1) pd = 32; + else { + if ((ps1 & 0xFFFF0000) == 0) { pd += 16; ps1 <<= 16; } + if ((ps1 & 0xFF000000) == 0) { pd += 8; ps1 <<= 8; } + if ((ps1 & 0xF0000000) == 0) { pd += 4; ps1 <<= 4; } + if ((ps1 & 0xC0000000) == 0) { pd += 2; ps1 <<= 2; } + if ((ps1 & 0x80000000) == 0) { pd += 1; } + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/clo8.h b/vendor/riscv-isa-sim/riscv/insns/clo8.h new file mode 100644 index 00000000..2581adec --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/clo8.h @@ -0,0 +1,10 @@ +P_ONE_LOOP(8, { + pd = 0; + ps1 = ~ps1; + if (!ps1) pd = 8; + else { + if ((ps1 & 0xF0) == 0) { pd += 4; ps1 <<= 4; } + if ((ps1 & 0xC0) == 0) { pd += 2; ps1 <<= 2; } + if ((ps1 & 0x80) == 0) { pd += 1; } + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/clrs16.h b/vendor/riscv-isa-sim/riscv/insns/clrs16.h new file mode 100644 index 00000000..65412629 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/clrs16.h @@ -0,0 +1,12 @@ +P_ONE_LOOP(16, { + pd = 0; + if (ps1 < 0) ps1 = ~ps1; + if (!ps1) pd = 16; + else { + if ((ps1 & 0xFF00) == 0) { pd += 8; ps1 <<= 8; } + if ((ps1 & 0xF000) == 0) { pd += 4; ps1 <<= 4; } + if ((ps1 & 0xC000) == 0) { pd += 2; ps1 <<= 2; } + if ((ps1 & 0x8000) == 0) { pd += 1; } + } + pd -= 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/clrs32.h b/vendor/riscv-isa-sim/riscv/insns/clrs32.h new file mode 100644 index 00000000..c75db180 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/clrs32.h @@ -0,0 +1,13 @@ +P_ONE_LOOP(32, { + pd = 0; + if (ps1 < 0) ps1 = ~ps1; + if (!ps1) pd = 32; + else { + if ((ps1 & 0xFFFF0000) == 0) { pd += 16; ps1 <<= 16; } + if ((ps1 & 0xFF000000) == 0) { pd += 8; ps1 <<= 8; } + if ((ps1 & 0xF0000000) == 0) { pd += 4; ps1 <<= 4; } + if ((ps1 & 0xC0000000) == 0) { pd += 2; ps1 <<= 2; } + if ((ps1 & 0x80000000) == 0) { pd += 1; } + } + pd -= 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/clrs8.h b/vendor/riscv-isa-sim/riscv/insns/clrs8.h new file mode 100644 index 00000000..f6f82987 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/clrs8.h @@ -0,0 +1,11 @@ +P_ONE_LOOP(8, { + pd = 0; + if (ps1 < 0) ps1 = ~ps1; + if (!ps1) pd = 8; + else { + if ((ps1 & 0xF0) == 0) { pd += 4; ps1 <<= 4; } + if ((ps1 & 0xC0) == 0) { pd += 2; ps1 <<= 2; } + if ((ps1 & 0x80) == 0) { pd += 1; } + } + pd -= 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/clz.h b/vendor/riscv-isa-sim/riscv/insns/clz.h new file mode 100644 index 00000000..e10e4d2d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/clz.h @@ -0,0 +1,5 @@ +require_either_extension(xlen == 32 ? EXT_ZBPBO : EXT_ZBB, EXT_ZBB); +reg_t x = xlen; +for (int i = 0; i < xlen; i++) + if (1 & (RS1 >> (xlen-i-1))) { x = i; break; } +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/clz16.h b/vendor/riscv-isa-sim/riscv/insns/clz16.h new file mode 100644 index 00000000..a129d59a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/clz16.h @@ -0,0 +1,10 @@ +P_ONE_LOOP(16, { + pd = 0; + if (ps1 == 0) pd = 16; + else { + if ((ps1 & 0xFF00) == 0) { pd += 8; ps1 <<= 8; } + if ((ps1 & 0xF000) == 0) { pd += 4; ps1 <<= 4; } + if ((ps1 & 0xC000) == 0) { pd += 2; ps1 <<= 2; } + if ((ps1 & 0x8000) == 0) { pd += 1; } + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/clz32.h b/vendor/riscv-isa-sim/riscv/insns/clz32.h new file mode 100644 index 00000000..a38dda76 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/clz32.h @@ -0,0 +1,12 @@ +require_rv64; +P_ONE_LOOP(32, { + pd = 0; + if (ps1 == 0) pd = 32; + else { + if ((ps1 & 0xFFFF0000) == 0) { pd += 16; ps1 <<= 16; } + if ((ps1 & 0xFF000000) == 0) { pd += 8; ps1 <<= 8; } + if ((ps1 & 0xF0000000) == 0) { pd += 4; ps1 <<= 4; } + if ((ps1 & 0xC0000000) == 0) { pd += 2; ps1 <<= 2; } + if ((ps1 & 0x80000000) == 0) { pd += 1; } + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/clz8.h b/vendor/riscv-isa-sim/riscv/insns/clz8.h new file mode 100644 index 00000000..78ff6b7b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/clz8.h @@ -0,0 +1,9 @@ +P_ONE_LOOP(8, { + pd = 0; + if (ps1 == 0) pd = 8; + else { + if ((ps1 & 0xF0) == 0) { pd += 4; ps1 <<= 4; } + if ((ps1 & 0xC0) == 0) { pd += 2; ps1 <<= 2; } + if ((ps1 & 0x80) == 0) { pd += 1; } + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/clzw.h b/vendor/riscv-isa-sim/riscv/insns/clzw.h new file mode 100644 index 00000000..46816e77 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/clzw.h @@ -0,0 +1,6 @@ +require_rv64; +require_extension(EXT_ZBB); +reg_t x = 32; +for (int i = 0; i < 32; i++) + if (1 & (RS1 >> (31-i))) { x = i; break; } +WRITE_RD(sext32(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/cmix.h b/vendor/riscv-isa-sim/riscv/insns/cmix.h new file mode 100644 index 00000000..98eb0bca --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/cmix.h @@ -0,0 +1,2 @@ +require_either_extension(EXT_ZBPBO, EXT_XZBT); +WRITE_RD((RS1 & RS2) | (RS3 & ~RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/cmov.h b/vendor/riscv-isa-sim/riscv/insns/cmov.h new file mode 100644 index 00000000..c7551bc6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/cmov.h @@ -0,0 +1,2 @@ +require_extension(EXT_XZBT); +WRITE_RD(RS2 ? RS1 : RS3); diff --git a/vendor/riscv-isa-sim/riscv/insns/cmpeq16.h b/vendor/riscv-isa-sim/riscv/insns/cmpeq16.h new file mode 100644 index 00000000..4fb6faab --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/cmpeq16.h @@ -0,0 +1,3 @@ +P_LOOP(16, { + pd = (ps1 == ps2) ? -1 : 0; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/cmpeq8.h b/vendor/riscv-isa-sim/riscv/insns/cmpeq8.h new file mode 100644 index 00000000..fba1bf6d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/cmpeq8.h @@ -0,0 +1,3 @@ +P_LOOP(8, { + pd = (ps1 == ps2) ? -1 : 0; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/cpop.h b/vendor/riscv-isa-sim/riscv/insns/cpop.h new file mode 100644 index 00000000..1f5c3ef8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/cpop.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZBB); +reg_t x = 0; +for (int i = 0; i < xlen; i++) + if (1 & (RS1 >> i)) x++; +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/cpopw.h b/vendor/riscv-isa-sim/riscv/insns/cpopw.h new file mode 100644 index 00000000..41383985 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/cpopw.h @@ -0,0 +1,6 @@ +require_rv64; +require_extension(EXT_ZBB); +reg_t x = 0; +for (int i = 0; i < 32; i++) + if (1 & (RS1 >> i)) x++; +WRITE_RD(sext32(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/cras16.h b/vendor/riscv-isa-sim/riscv/insns/cras16.h new file mode 100644 index 00000000..6717e099 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/cras16.h @@ -0,0 +1,5 @@ +P_CROSS_LOOP(16, { + pd = ps1 + ps2; +}, { + pd = ps1 - ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/cras32.h b/vendor/riscv-isa-sim/riscv/insns/cras32.h new file mode 100644 index 00000000..8f53e98b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/cras32.h @@ -0,0 +1,6 @@ +require_rv64; +P_CROSS_LOOP(32, { + pd = ps1 + ps2; +}, { + pd = ps1 - ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/crc32_b.h b/vendor/riscv-isa-sim/riscv/insns/crc32_b.h new file mode 100644 index 00000000..3111fe57 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/crc32_b.h @@ -0,0 +1,5 @@ +require_extension(EXT_XZBR); +reg_t x = zext_xlen(RS1); +for (int i = 0; i < 8; i++) + x = (x >> 1) ^ (0xEDB88320 & ~((x&1)-1)); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/crc32_d.h b/vendor/riscv-isa-sim/riscv/insns/crc32_d.h new file mode 100644 index 00000000..7fd7a38f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/crc32_d.h @@ -0,0 +1,6 @@ +require_rv64; +require_extension(EXT_XZBR); +reg_t x = zext_xlen(RS1); +for (int i = 0; i < 64; i++) + x = (x >> 1) ^ (0xEDB88320 & ~((x&1)-1)); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/crc32_h.h b/vendor/riscv-isa-sim/riscv/insns/crc32_h.h new file mode 100644 index 00000000..5063fefd --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/crc32_h.h @@ -0,0 +1,5 @@ +require_extension(EXT_XZBR); +reg_t x = zext_xlen(RS1); +for (int i = 0; i < 16; i++) + x = (x >> 1) ^ (0xEDB88320 & ~((x&1)-1)); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/crc32_w.h b/vendor/riscv-isa-sim/riscv/insns/crc32_w.h new file mode 100644 index 00000000..6e425ab8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/crc32_w.h @@ -0,0 +1,5 @@ +require_extension(EXT_XZBR); +reg_t x = zext_xlen(RS1); +for (int i = 0; i < 32; i++) + x = (x >> 1) ^ (0xEDB88320 & ~((x&1)-1)); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/crc32c_b.h b/vendor/riscv-isa-sim/riscv/insns/crc32c_b.h new file mode 100644 index 00000000..d11b0dda --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/crc32c_b.h @@ -0,0 +1,5 @@ +require_extension(EXT_XZBR); +reg_t x = zext_xlen(RS1); +for (int i = 0; i < 8; i++) + x = (x >> 1) ^ (0x82F63B78 & ~((x&1)-1)); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/crc32c_d.h b/vendor/riscv-isa-sim/riscv/insns/crc32c_d.h new file mode 100644 index 00000000..81175fd9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/crc32c_d.h @@ -0,0 +1,6 @@ +require_rv64; +require_extension(EXT_XZBR); +reg_t x = zext_xlen(RS1); +for (int i = 0; i < 64; i++) + x = (x >> 1) ^ (0x82F63B78 & ~((x&1)-1)); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/crc32c_h.h b/vendor/riscv-isa-sim/riscv/insns/crc32c_h.h new file mode 100644 index 00000000..ef5817d9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/crc32c_h.h @@ -0,0 +1,5 @@ +require_extension(EXT_XZBR); +reg_t x = zext_xlen(RS1); +for (int i = 0; i < 16; i++) + x = (x >> 1) ^ (0x82F63B78 & ~((x&1)-1)); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/crc32c_w.h b/vendor/riscv-isa-sim/riscv/insns/crc32c_w.h new file mode 100644 index 00000000..87935402 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/crc32c_w.h @@ -0,0 +1,5 @@ +require_extension(EXT_XZBR); +reg_t x = zext_xlen(RS1); +for (int i = 0; i < 32; i++) + x = (x >> 1) ^ (0x82F63B78 & ~((x&1)-1)); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/crsa16.h b/vendor/riscv-isa-sim/riscv/insns/crsa16.h new file mode 100644 index 00000000..2c1997ac --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/crsa16.h @@ -0,0 +1,5 @@ +P_CROSS_LOOP(16, { + pd = ps1 - ps2; +}, { + pd = ps1 + ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/crsa32.h b/vendor/riscv-isa-sim/riscv/insns/crsa32.h new file mode 100644 index 00000000..4290e9ed --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/crsa32.h @@ -0,0 +1,6 @@ +require_rv64; +P_CROSS_LOOP(32, { + pd = (int64_t)ps1 - ps2; +}, { + pd = (int64_t)ps1 + ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/csrrc.h b/vendor/riscv-isa-sim/riscv/insns/csrrc.h new file mode 100644 index 00000000..019a9ce4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/csrrc.h @@ -0,0 +1,8 @@ +bool write = insn.rs1() != 0; +int csr = validate_csr(insn.csr(), write); +reg_t old = p->get_csr(csr, insn, write); +if (write) { + p->put_csr(csr, old & ~RS1); +} +WRITE_RD(sext_xlen(old)); +serialize(); diff --git a/vendor/riscv-isa-sim/riscv/insns/csrrci.h b/vendor/riscv-isa-sim/riscv/insns/csrrci.h new file mode 100644 index 00000000..f02d3268 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/csrrci.h @@ -0,0 +1,8 @@ +bool write = insn.rs1() != 0; +int csr = validate_csr(insn.csr(), write); +reg_t old = p->get_csr(csr, insn, write); +if (write) { + p->put_csr(csr, old & ~(reg_t)insn.rs1()); +} +WRITE_RD(sext_xlen(old)); +serialize(); diff --git a/vendor/riscv-isa-sim/riscv/insns/csrrs.h b/vendor/riscv-isa-sim/riscv/insns/csrrs.h new file mode 100644 index 00000000..7632d1f4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/csrrs.h @@ -0,0 +1,8 @@ +bool write = insn.rs1() != 0; +int csr = validate_csr(insn.csr(), write); +reg_t old = p->get_csr(csr, insn, write); +if (write) { + p->put_csr(csr, old | RS1); +} +WRITE_RD(sext_xlen(old)); +serialize(); diff --git a/vendor/riscv-isa-sim/riscv/insns/csrrsi.h b/vendor/riscv-isa-sim/riscv/insns/csrrsi.h new file mode 100644 index 00000000..9acfcfcf --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/csrrsi.h @@ -0,0 +1,8 @@ +bool write = insn.rs1() != 0; +int csr = validate_csr(insn.csr(), write); +reg_t old = p->get_csr(csr, insn, write); +if (write) { + p->put_csr(csr, old | insn.rs1()); +} +WRITE_RD(sext_xlen(old)); +serialize(); diff --git a/vendor/riscv-isa-sim/riscv/insns/csrrw.h b/vendor/riscv-isa-sim/riscv/insns/csrrw.h new file mode 100644 index 00000000..e4c605bd --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/csrrw.h @@ -0,0 +1,5 @@ +int csr = validate_csr(insn.csr(), true); +reg_t old = p->get_csr(csr, insn, true); +p->put_csr(csr, RS1); +WRITE_RD(sext_xlen(old)); +serialize(); diff --git a/vendor/riscv-isa-sim/riscv/insns/csrrwi.h b/vendor/riscv-isa-sim/riscv/insns/csrrwi.h new file mode 100644 index 00000000..77fec154 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/csrrwi.h @@ -0,0 +1,5 @@ +int csr = validate_csr(insn.csr(), true); +reg_t old = p->get_csr(csr, insn, true); +p->put_csr(csr, insn.rs1()); +WRITE_RD(sext_xlen(old)); +serialize(); diff --git a/vendor/riscv-isa-sim/riscv/insns/ctz.h b/vendor/riscv-isa-sim/riscv/insns/ctz.h new file mode 100644 index 00000000..25d37239 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ctz.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZBB); +reg_t x = xlen; +for (int i = 0; i < xlen; i++) + if (1 & (RS1 >> i)) { x = i; break; } +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/ctzw.h b/vendor/riscv-isa-sim/riscv/insns/ctzw.h new file mode 100644 index 00000000..aca46e9d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ctzw.h @@ -0,0 +1,6 @@ +require_rv64; +require_extension(EXT_ZBB); +reg_t x = 32; +for (int i = 0; i < 32; i++) + if (1 & (RS1 >> i)) { x = i; break; } +WRITE_RD(sext32(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/div.h b/vendor/riscv-isa-sim/riscv/insns/div.h new file mode 100644 index 00000000..9cbe8d6b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/div.h @@ -0,0 +1,9 @@ +require_extension('M'); +sreg_t lhs = sext_xlen(RS1); +sreg_t rhs = sext_xlen(RS2); +if(rhs == 0) + WRITE_RD(UINT64_MAX); +else if(lhs == INT64_MIN && rhs == -1) + WRITE_RD(lhs); +else + WRITE_RD(sext_xlen(lhs / rhs)); diff --git a/vendor/riscv-isa-sim/riscv/insns/divu.h b/vendor/riscv-isa-sim/riscv/insns/divu.h new file mode 100644 index 00000000..31d75856 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/divu.h @@ -0,0 +1,7 @@ +require_extension('M'); +reg_t lhs = zext_xlen(RS1); +reg_t rhs = zext_xlen(RS2); +if(rhs == 0) + WRITE_RD(UINT64_MAX); +else + WRITE_RD(sext_xlen(lhs / rhs)); diff --git a/vendor/riscv-isa-sim/riscv/insns/divuw.h b/vendor/riscv-isa-sim/riscv/insns/divuw.h new file mode 100644 index 00000000..e127619a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/divuw.h @@ -0,0 +1,8 @@ +require_extension('M'); +require_rv64; +reg_t lhs = zext32(RS1); +reg_t rhs = zext32(RS2); +if(rhs == 0) + WRITE_RD(UINT64_MAX); +else + WRITE_RD(sext32(lhs / rhs)); diff --git a/vendor/riscv-isa-sim/riscv/insns/divw.h b/vendor/riscv-isa-sim/riscv/insns/divw.h new file mode 100644 index 00000000..11be17e4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/divw.h @@ -0,0 +1,8 @@ +require_extension('M'); +require_rv64; +sreg_t lhs = sext32(RS1); +sreg_t rhs = sext32(RS2); +if(rhs == 0) + WRITE_RD(UINT64_MAX); +else + WRITE_RD(sext32(lhs / rhs)); diff --git a/vendor/riscv-isa-sim/riscv/insns/dret.h b/vendor/riscv-isa-sim/riscv/insns/dret.h new file mode 100644 index 00000000..01a39923 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/dret.h @@ -0,0 +1,9 @@ +require(STATE.debug_mode); +set_pc_and_serialize(STATE.dpc->read()); +p->set_privilege(STATE.dcsr->prv); + +/* We're not in Debug Mode anymore. */ +STATE.debug_mode = false; + +if (STATE.dcsr->step) + STATE.single_step = STATE.STEP_STEPPING; diff --git a/vendor/riscv-isa-sim/riscv/insns/ebreak.h b/vendor/riscv-isa-sim/riscv/insns/ebreak.h new file mode 100644 index 00000000..9f3d44d9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ebreak.h @@ -0,0 +1 @@ +throw trap_breakpoint(STATE.v, pc); diff --git a/vendor/riscv-isa-sim/riscv/insns/ecall.h b/vendor/riscv-isa-sim/riscv/insns/ecall.h new file mode 100644 index 00000000..e6c723f4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ecall.h @@ -0,0 +1,11 @@ +switch (STATE.prv) +{ + case PRV_U: throw trap_user_ecall(); + case PRV_S: + if (STATE.v) + throw trap_virtual_supervisor_ecall(); + else + throw trap_supervisor_ecall(); + case PRV_M: throw trap_machine_ecall(); + default: abort(); +} diff --git a/vendor/riscv-isa-sim/riscv/insns/fadd_d.h b/vendor/riscv-isa-sim/riscv/insns/fadd_d.h new file mode 100644 index 00000000..4a436e24 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fadd_d.h @@ -0,0 +1,5 @@ +require_extension('D'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f64_add(f64(FRS1), f64(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fadd_h.h b/vendor/riscv-isa-sim/riscv/insns/fadd_h.h new file mode 100644 index 00000000..2b646ae7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fadd_h.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZFH); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f16_add(f16(FRS1), f16(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fadd_q.h b/vendor/riscv-isa-sim/riscv/insns/fadd_q.h new file mode 100644 index 00000000..1139a74d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fadd_q.h @@ -0,0 +1,5 @@ +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f128_add(f128(FRS1), f128(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fadd_s.h b/vendor/riscv-isa-sim/riscv/insns/fadd_s.h new file mode 100644 index 00000000..cc18d58c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fadd_s.h @@ -0,0 +1,5 @@ +require_extension('F'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f32_add(f32(FRS1), f32(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fclass_d.h b/vendor/riscv-isa-sim/riscv/insns/fclass_d.h new file mode 100644 index 00000000..9456123d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fclass_d.h @@ -0,0 +1,3 @@ +require_extension('D'); +require_fp; +WRITE_RD(f64_classify(f64(FRS1))); diff --git a/vendor/riscv-isa-sim/riscv/insns/fclass_h.h b/vendor/riscv-isa-sim/riscv/insns/fclass_h.h new file mode 100644 index 00000000..066a2d24 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fclass_h.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZFH); +require_fp; +WRITE_RD(f16_classify(f16(FRS1))); diff --git a/vendor/riscv-isa-sim/riscv/insns/fclass_q.h b/vendor/riscv-isa-sim/riscv/insns/fclass_q.h new file mode 100644 index 00000000..53307582 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fclass_q.h @@ -0,0 +1,3 @@ +require_extension('Q'); +require_fp; +WRITE_RD(f128_classify(f128(FRS1))); diff --git a/vendor/riscv-isa-sim/riscv/insns/fclass_s.h b/vendor/riscv-isa-sim/riscv/insns/fclass_s.h new file mode 100644 index 00000000..a392db88 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fclass_s.h @@ -0,0 +1,3 @@ +require_extension('F'); +require_fp; +WRITE_RD(f32_classify(f32(FRS1))); diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_d_h.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_d_h.h new file mode 100644 index 00000000..04e9ff4e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_d_h.h @@ -0,0 +1,6 @@ +require_extension(EXT_ZFHMIN); +require_extension('D'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f16_to_f64(f16(FRS1))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_d_l.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_d_l.h new file mode 100644 index 00000000..08716cff --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_d_l.h @@ -0,0 +1,6 @@ +require_extension('D'); +require_rv64; +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(i64_to_f64(RS1)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_d_lu.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_d_lu.h new file mode 100644 index 00000000..306d7fed --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_d_lu.h @@ -0,0 +1,6 @@ +require_extension('D'); +require_rv64; +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(ui64_to_f64(RS1)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_d_q.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_d_q.h new file mode 100644 index 00000000..b50a43d0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_d_q.h @@ -0,0 +1,5 @@ +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f128_to_f64(f128(FRS1))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_d_s.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_d_s.h new file mode 100644 index 00000000..5f805b06 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_d_s.h @@ -0,0 +1,5 @@ +require_extension('D'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f32_to_f64(f32(FRS1))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_d_w.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_d_w.h new file mode 100644 index 00000000..4c4861c1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_d_w.h @@ -0,0 +1,5 @@ +require_extension('D'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(i32_to_f64((int32_t)RS1)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_d_wu.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_d_wu.h new file mode 100644 index 00000000..1dbf218a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_d_wu.h @@ -0,0 +1,5 @@ +require_extension('D'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(ui32_to_f64((uint32_t)RS1)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_h_d.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_h_d.h new file mode 100644 index 00000000..e9987b7f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_h_d.h @@ -0,0 +1,6 @@ +require_extension(EXT_ZFHMIN); +require_extension('D'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f64_to_f16(f64(FRS1))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_h_l.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_h_l.h new file mode 100644 index 00000000..39178c2f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_h_l.h @@ -0,0 +1,6 @@ +require_extension(EXT_ZFH); +require_rv64; +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(i64_to_f16(RS1)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_h_lu.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_h_lu.h new file mode 100644 index 00000000..a872c480 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_h_lu.h @@ -0,0 +1,6 @@ +require_extension(EXT_ZFH); +require_rv64; +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(ui64_to_f16(RS1)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_h_q.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_h_q.h new file mode 100644 index 00000000..4dfdd536 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_h_q.h @@ -0,0 +1,6 @@ +require_extension(EXT_ZFHMIN); +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f128_to_f16(f128(FRS1))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_h_s.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_h_s.h new file mode 100644 index 00000000..ce39d814 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_h_s.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZFHMIN); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f32_to_f16(f32(FRS1))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_h_w.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_h_w.h new file mode 100644 index 00000000..c0824545 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_h_w.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZFH); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(i32_to_f16((int32_t)RS1)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_h_wu.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_h_wu.h new file mode 100644 index 00000000..9f2f5f6a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_h_wu.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZFH); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(ui32_to_f16((uint32_t)RS1)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_l_d.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_l_d.h new file mode 100644 index 00000000..c09e6c44 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_l_d.h @@ -0,0 +1,6 @@ +require_extension('D'); +require_rv64; +require_fp; +softfloat_roundingMode = RM; +WRITE_RD(f64_to_i64(f64(FRS1), RM, true)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_l_h.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_l_h.h new file mode 100644 index 00000000..5a1fea85 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_l_h.h @@ -0,0 +1,6 @@ +require_extension(EXT_ZFH); +require_rv64; +require_fp; +softfloat_roundingMode = RM; +WRITE_RD(f16_to_i64(f16(FRS1), RM, true)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_l_q.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_l_q.h new file mode 100644 index 00000000..b28bca23 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_l_q.h @@ -0,0 +1,6 @@ +require_extension('Q'); +require_rv64; +require_fp; +softfloat_roundingMode = RM; +WRITE_RD(f128_to_i64(f128(FRS1), RM, true)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_l_s.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_l_s.h new file mode 100644 index 00000000..267e0eb8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_l_s.h @@ -0,0 +1,6 @@ +require_extension('F'); +require_rv64; +require_fp; +softfloat_roundingMode = RM; +WRITE_RD(f32_to_i64(f32(FRS1), RM, true)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_lu_d.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_lu_d.h new file mode 100644 index 00000000..3a021204 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_lu_d.h @@ -0,0 +1,6 @@ +require_extension('D'); +require_rv64; +require_fp; +softfloat_roundingMode = RM; +WRITE_RD(f64_to_ui64(f64(FRS1), RM, true)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_lu_h.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_lu_h.h new file mode 100644 index 00000000..f1454c3e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_lu_h.h @@ -0,0 +1,6 @@ +require_extension(EXT_ZFH); +require_rv64; +require_fp; +softfloat_roundingMode = RM; +WRITE_RD(f16_to_ui64(f16(FRS1), RM, true)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_lu_q.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_lu_q.h new file mode 100644 index 00000000..8c5be7c6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_lu_q.h @@ -0,0 +1,6 @@ +require_extension('Q'); +require_rv64; +require_fp; +softfloat_roundingMode = RM; +WRITE_RD(f128_to_ui64(f128(FRS1), RM, true)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_lu_s.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_lu_s.h new file mode 100644 index 00000000..94115a3f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_lu_s.h @@ -0,0 +1,6 @@ +require_extension('F'); +require_rv64; +require_fp; +softfloat_roundingMode = RM; +WRITE_RD(f32_to_ui64(f32(FRS1), RM, true)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_q_d.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_q_d.h new file mode 100644 index 00000000..c2437b12 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_q_d.h @@ -0,0 +1,5 @@ +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f64_to_f128(f64(FRS1))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_q_h.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_q_h.h new file mode 100644 index 00000000..8bf16ce4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_q_h.h @@ -0,0 +1,6 @@ +require_extension(EXT_ZFHMIN); +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f16_to_f128(f16(FRS1))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_q_l.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_q_l.h new file mode 100644 index 00000000..f1f45ca3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_q_l.h @@ -0,0 +1,6 @@ +require_extension('Q'); +require_rv64; +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(i64_to_f128(RS1)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_q_lu.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_q_lu.h new file mode 100644 index 00000000..850212e9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_q_lu.h @@ -0,0 +1,6 @@ +require_extension('Q'); +require_rv64; +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(ui64_to_f128(RS1)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_q_s.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_q_s.h new file mode 100644 index 00000000..79e6bb6f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_q_s.h @@ -0,0 +1,5 @@ +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f32_to_f128(f32(FRS1))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_q_w.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_q_w.h new file mode 100644 index 00000000..fb83f15d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_q_w.h @@ -0,0 +1,5 @@ +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(i32_to_f128((int32_t)RS1)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_q_wu.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_q_wu.h new file mode 100644 index 00000000..7c2ae97e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_q_wu.h @@ -0,0 +1,5 @@ +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(ui32_to_f128((uint32_t)RS1)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_s_d.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_s_d.h new file mode 100644 index 00000000..40333359 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_s_d.h @@ -0,0 +1,5 @@ +require_extension('D'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f64_to_f32(f64(FRS1))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_s_h.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_s_h.h new file mode 100644 index 00000000..22cdd728 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_s_h.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZFHMIN); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f16_to_f32(f16(FRS1))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_s_l.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_s_l.h new file mode 100644 index 00000000..9abcc805 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_s_l.h @@ -0,0 +1,6 @@ +require_extension('F'); +require_rv64; +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(i64_to_f32(RS1)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_s_lu.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_s_lu.h new file mode 100644 index 00000000..70c676ed --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_s_lu.h @@ -0,0 +1,6 @@ +require_extension('F'); +require_rv64; +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(ui64_to_f32(RS1)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_s_q.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_s_q.h new file mode 100644 index 00000000..b0f118ec --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_s_q.h @@ -0,0 +1,5 @@ +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f128_to_f32(f128(FRS1))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_s_w.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_s_w.h new file mode 100644 index 00000000..1ddabd87 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_s_w.h @@ -0,0 +1,5 @@ +require_extension('F'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(i32_to_f32((int32_t)RS1)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_s_wu.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_s_wu.h new file mode 100644 index 00000000..c1394c3f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_s_wu.h @@ -0,0 +1,5 @@ +require_extension('F'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(ui32_to_f32((uint32_t)RS1)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_w_d.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_w_d.h new file mode 100644 index 00000000..28eb2456 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_w_d.h @@ -0,0 +1,5 @@ +require_extension('D'); +require_fp; +softfloat_roundingMode = RM; +WRITE_RD(sext32(f64_to_i32(f64(FRS1), RM, true))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_w_h.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_w_h.h new file mode 100644 index 00000000..fe8bb48f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_w_h.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZFH); +require_fp; +softfloat_roundingMode = RM; +WRITE_RD(sext32(f16_to_i32(f16(FRS1), RM, true))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_w_q.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_w_q.h new file mode 100644 index 00000000..e10bafc9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_w_q.h @@ -0,0 +1,5 @@ +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_RD(sext32(f128_to_i32(f128(FRS1), RM, true))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_w_s.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_w_s.h new file mode 100644 index 00000000..d30f1b44 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_w_s.h @@ -0,0 +1,5 @@ +require_extension('F'); +require_fp; +softfloat_roundingMode = RM; +WRITE_RD(sext32(f32_to_i32(f32(FRS1), RM, true))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_wu_d.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_wu_d.h new file mode 100644 index 00000000..5cdc004c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_wu_d.h @@ -0,0 +1,5 @@ +require_extension('D'); +require_fp; +softfloat_roundingMode = RM; +WRITE_RD(sext32(f64_to_ui32(f64(FRS1), RM, true))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_wu_h.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_wu_h.h new file mode 100644 index 00000000..bf6648d3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_wu_h.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZFH); +require_fp; +softfloat_roundingMode = RM; +WRITE_RD(sext32(f16_to_ui32(f16(FRS1), RM, true))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_wu_q.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_wu_q.h new file mode 100644 index 00000000..c391dc87 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_wu_q.h @@ -0,0 +1,5 @@ +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_RD(sext32(f128_to_ui32(f128(FRS1), RM, true))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_wu_s.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_wu_s.h new file mode 100644 index 00000000..034d6816 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_wu_s.h @@ -0,0 +1,5 @@ +require_extension('F'); +require_fp; +softfloat_roundingMode = RM; +WRITE_RD(sext32(f32_to_ui32(f32(FRS1), RM, true))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fdiv_d.h b/vendor/riscv-isa-sim/riscv/insns/fdiv_d.h new file mode 100644 index 00000000..ae7911ae --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fdiv_d.h @@ -0,0 +1,5 @@ +require_extension('D'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f64_div(f64(FRS1), f64(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fdiv_h.h b/vendor/riscv-isa-sim/riscv/insns/fdiv_h.h new file mode 100644 index 00000000..a169eae8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fdiv_h.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZFH); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f16_div(f16(FRS1), f16(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fdiv_q.h b/vendor/riscv-isa-sim/riscv/insns/fdiv_q.h new file mode 100644 index 00000000..22048317 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fdiv_q.h @@ -0,0 +1,5 @@ +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f128_div(f128(FRS1), f128(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fdiv_s.h b/vendor/riscv-isa-sim/riscv/insns/fdiv_s.h new file mode 100644 index 00000000..c74ff041 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fdiv_s.h @@ -0,0 +1,5 @@ +require_extension('F'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f32_div(f32(FRS1), f32(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fence.h b/vendor/riscv-isa-sim/riscv/insns/fence.h new file mode 100644 index 00000000..e69de29b diff --git a/vendor/riscv-isa-sim/riscv/insns/fence_i.h b/vendor/riscv-isa-sim/riscv/insns/fence_i.h new file mode 100644 index 00000000..38dcaf3f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fence_i.h @@ -0,0 +1 @@ +MMU.flush_icache(); diff --git a/vendor/riscv-isa-sim/riscv/insns/feq_d.h b/vendor/riscv-isa-sim/riscv/insns/feq_d.h new file mode 100644 index 00000000..541ed5bb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/feq_d.h @@ -0,0 +1,4 @@ +require_extension('D'); +require_fp; +WRITE_RD(f64_eq(f64(FRS1), f64(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/feq_h.h b/vendor/riscv-isa-sim/riscv/insns/feq_h.h new file mode 100644 index 00000000..47e75a5b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/feq_h.h @@ -0,0 +1,4 @@ +require_extension(EXT_ZFH); +require_fp; +WRITE_RD(f16_eq(f16(FRS1), f16(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/feq_q.h b/vendor/riscv-isa-sim/riscv/insns/feq_q.h new file mode 100644 index 00000000..cee2da95 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/feq_q.h @@ -0,0 +1,4 @@ +require_extension('Q'); +require_fp; +WRITE_RD(f128_eq(f128(FRS1), f128(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/feq_s.h b/vendor/riscv-isa-sim/riscv/insns/feq_s.h new file mode 100644 index 00000000..489bea69 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/feq_s.h @@ -0,0 +1,4 @@ +require_extension('F'); +require_fp; +WRITE_RD(f32_eq(f32(FRS1), f32(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fld.h b/vendor/riscv-isa-sim/riscv/insns/fld.h new file mode 100644 index 00000000..4dea1d47 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fld.h @@ -0,0 +1,3 @@ +require_extension('D'); +require_fp; +WRITE_FRD(f64(MMU.load_uint64(RS1 + insn.i_imm()))); diff --git a/vendor/riscv-isa-sim/riscv/insns/fle_d.h b/vendor/riscv-isa-sim/riscv/insns/fle_d.h new file mode 100644 index 00000000..419a36fc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fle_d.h @@ -0,0 +1,4 @@ +require_extension('D'); +require_fp; +WRITE_RD(f64_le(f64(FRS1), f64(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fle_h.h b/vendor/riscv-isa-sim/riscv/insns/fle_h.h new file mode 100644 index 00000000..9fc59685 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fle_h.h @@ -0,0 +1,4 @@ +require_extension(EXT_ZFH); +require_fp; +WRITE_RD(f16_le(f16(FRS1), f16(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fle_q.h b/vendor/riscv-isa-sim/riscv/insns/fle_q.h new file mode 100644 index 00000000..8368af9d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fle_q.h @@ -0,0 +1,4 @@ +require_extension('Q'); +require_fp; +WRITE_RD(f128_le(f128(FRS1), f128(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fle_s.h b/vendor/riscv-isa-sim/riscv/insns/fle_s.h new file mode 100644 index 00000000..5c0124ef --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fle_s.h @@ -0,0 +1,4 @@ +require_extension('F'); +require_fp; +WRITE_RD(f32_le(f32(FRS1), f32(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/flh.h b/vendor/riscv-isa-sim/riscv/insns/flh.h new file mode 100644 index 00000000..bdb22d3e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/flh.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZFHMIN); +require_fp; +WRITE_FRD(f16(MMU.load_uint16(RS1 + insn.i_imm()))); diff --git a/vendor/riscv-isa-sim/riscv/insns/flq.h b/vendor/riscv-isa-sim/riscv/insns/flq.h new file mode 100644 index 00000000..81d225cd --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/flq.h @@ -0,0 +1,3 @@ +require_extension('Q'); +require_fp; +WRITE_FRD(MMU.load_float128(RS1 + insn.i_imm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/flt_d.h b/vendor/riscv-isa-sim/riscv/insns/flt_d.h new file mode 100644 index 00000000..7176a961 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/flt_d.h @@ -0,0 +1,4 @@ +require_extension('D'); +require_fp; +WRITE_RD(f64_lt(f64(FRS1), f64(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/flt_h.h b/vendor/riscv-isa-sim/riscv/insns/flt_h.h new file mode 100644 index 00000000..f516a38a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/flt_h.h @@ -0,0 +1,4 @@ +require_extension(EXT_ZFH); +require_fp; +WRITE_RD(f16_lt(f16(FRS1), f16(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/flt_q.h b/vendor/riscv-isa-sim/riscv/insns/flt_q.h new file mode 100644 index 00000000..c4521418 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/flt_q.h @@ -0,0 +1,4 @@ +require_extension('Q'); +require_fp; +WRITE_RD(f128_lt(f128(FRS1), f128(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/flt_s.h b/vendor/riscv-isa-sim/riscv/insns/flt_s.h new file mode 100644 index 00000000..40acc34b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/flt_s.h @@ -0,0 +1,4 @@ +require_extension('F'); +require_fp; +WRITE_RD(f32_lt(f32(FRS1), f32(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/flw.h b/vendor/riscv-isa-sim/riscv/insns/flw.h new file mode 100644 index 00000000..61297544 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/flw.h @@ -0,0 +1,3 @@ +require_extension('F'); +require_fp; +WRITE_FRD(f32(MMU.load_uint32(RS1 + insn.i_imm()))); diff --git a/vendor/riscv-isa-sim/riscv/insns/fmadd_d.h b/vendor/riscv-isa-sim/riscv/insns/fmadd_d.h new file mode 100644 index 00000000..ab22bebb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmadd_d.h @@ -0,0 +1,5 @@ +require_extension('D'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f64_mulAdd(f64(FRS1), f64(FRS2), f64(FRS3))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmadd_h.h b/vendor/riscv-isa-sim/riscv/insns/fmadd_h.h new file mode 100644 index 00000000..6551de5e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmadd_h.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZFH); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f16_mulAdd(f16(FRS1), f16(FRS2), f16(FRS3))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmadd_q.h b/vendor/riscv-isa-sim/riscv/insns/fmadd_q.h new file mode 100644 index 00000000..882dfc1d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmadd_q.h @@ -0,0 +1,5 @@ +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f128_mulAdd(f128(FRS1), f128(FRS2), f128(FRS3))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmadd_s.h b/vendor/riscv-isa-sim/riscv/insns/fmadd_s.h new file mode 100644 index 00000000..e919190c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmadd_s.h @@ -0,0 +1,5 @@ +require_extension('F'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f32_mulAdd(f32(FRS1), f32(FRS2), f32(FRS3))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmax_d.h b/vendor/riscv-isa-sim/riscv/insns/fmax_d.h new file mode 100644 index 00000000..11491f54 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmax_d.h @@ -0,0 +1,9 @@ +require_extension('D'); +require_fp; +bool greater = f64_lt_quiet(f64(FRS2), f64(FRS1)) || + (f64_eq(f64(FRS2), f64(FRS1)) && (f64(FRS2).v & F64_SIGN)); +if (isNaNF64UI(f64(FRS1).v) && isNaNF64UI(f64(FRS2).v)) + WRITE_FRD(f64(defaultNaNF64UI)); +else + WRITE_FRD(greater || isNaNF64UI(f64(FRS2).v) ? FRS1 : FRS2); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmax_h.h b/vendor/riscv-isa-sim/riscv/insns/fmax_h.h new file mode 100644 index 00000000..3d4c40eb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmax_h.h @@ -0,0 +1,4 @@ +require_extension(EXT_ZFH); +require_fp; +WRITE_FRD(f16_max(f16(FRS1), f16(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmax_q.h b/vendor/riscv-isa-sim/riscv/insns/fmax_q.h new file mode 100644 index 00000000..7dd7884a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmax_q.h @@ -0,0 +1,9 @@ +require_extension('Q'); +require_fp; +bool greater = f128_lt_quiet(f128(FRS2), f128(FRS1)) || + (f128_eq(f128(FRS2), f128(FRS1)) && (f128(FRS2).v[1] & F64_SIGN)); +if (isNaNF128(f128(FRS1)) && isNaNF128(f128(FRS2))) + WRITE_FRD(f128(defaultNaNF128())); +else + WRITE_FRD(greater || isNaNF128(f128(FRS2)) ? FRS1 : FRS2); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmax_s.h b/vendor/riscv-isa-sim/riscv/insns/fmax_s.h new file mode 100644 index 00000000..41d8f921 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmax_s.h @@ -0,0 +1,9 @@ +require_extension('F'); +require_fp; +bool greater = f32_lt_quiet(f32(FRS2), f32(FRS1)) || + (f32_eq(f32(FRS2), f32(FRS1)) && (f32(FRS2).v & F32_SIGN)); +if (isNaNF32UI(f32(FRS1).v) && isNaNF32UI(f32(FRS2).v)) + WRITE_FRD(f32(defaultNaNF32UI)); +else + WRITE_FRD(greater || isNaNF32UI(f32(FRS2).v) ? FRS1 : FRS2); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmin_d.h b/vendor/riscv-isa-sim/riscv/insns/fmin_d.h new file mode 100644 index 00000000..5cf349d4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmin_d.h @@ -0,0 +1,9 @@ +require_extension('D'); +require_fp; +bool less = f64_lt_quiet(f64(FRS1), f64(FRS2)) || + (f64_eq(f64(FRS1), f64(FRS2)) && (f64(FRS1).v & F64_SIGN)); +if (isNaNF64UI(f64(FRS1).v) && isNaNF64UI(f64(FRS2).v)) + WRITE_FRD(f64(defaultNaNF64UI)); +else + WRITE_FRD(less || isNaNF64UI(f64(FRS2).v) ? FRS1 : FRS2); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmin_h.h b/vendor/riscv-isa-sim/riscv/insns/fmin_h.h new file mode 100644 index 00000000..5fb1404f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmin_h.h @@ -0,0 +1,4 @@ +require_extension(EXT_ZFH); +require_fp; +WRITE_FRD(f16_min(f16(FRS1), f16(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmin_q.h b/vendor/riscv-isa-sim/riscv/insns/fmin_q.h new file mode 100644 index 00000000..fcb9526e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmin_q.h @@ -0,0 +1,9 @@ +require_extension('Q'); +require_fp; +bool less = f128_lt_quiet(f128(FRS1), f128(FRS2)) || + (f128_eq(f128(FRS1), f128(FRS2)) && (f128(FRS1).v[1] & F64_SIGN)); +if (isNaNF128(f128(FRS1)) && isNaNF128(f128(FRS2))) + WRITE_FRD(f128(defaultNaNF128())); +else + WRITE_FRD(less || isNaNF128(f128(FRS2)) ? FRS1 : FRS2); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmin_s.h b/vendor/riscv-isa-sim/riscv/insns/fmin_s.h new file mode 100644 index 00000000..19e11938 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmin_s.h @@ -0,0 +1,9 @@ +require_extension('F'); +require_fp; +bool less = f32_lt_quiet(f32(FRS1), f32(FRS2)) || + (f32_eq(f32(FRS1), f32(FRS2)) && (f32(FRS1).v & F32_SIGN)); +if (isNaNF32UI(f32(FRS1).v) && isNaNF32UI(f32(FRS2).v)) + WRITE_FRD(f32(defaultNaNF32UI)); +else + WRITE_FRD(less || isNaNF32UI(f32(FRS2).v) ? FRS1 : FRS2); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmsub_d.h b/vendor/riscv-isa-sim/riscv/insns/fmsub_d.h new file mode 100644 index 00000000..5b5bc0f7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmsub_d.h @@ -0,0 +1,5 @@ +require_extension('D'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f64_mulAdd(f64(FRS1), f64(FRS2), f64(f64(FRS3).v ^ F64_SIGN))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmsub_h.h b/vendor/riscv-isa-sim/riscv/insns/fmsub_h.h new file mode 100644 index 00000000..934291fc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmsub_h.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZFH); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f16_mulAdd(f16(FRS1), f16(FRS2), f16(f16(FRS3).v ^ F16_SIGN))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmsub_q.h b/vendor/riscv-isa-sim/riscv/insns/fmsub_q.h new file mode 100644 index 00000000..1bb96c27 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmsub_q.h @@ -0,0 +1,5 @@ +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f128_mulAdd(f128(FRS1), f128(FRS2), f128_negate(f128(FRS3)))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmsub_s.h b/vendor/riscv-isa-sim/riscv/insns/fmsub_s.h new file mode 100644 index 00000000..d46c887e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmsub_s.h @@ -0,0 +1,5 @@ +require_extension('F'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f32_mulAdd(f32(FRS1), f32(FRS2), f32(f32(FRS3).v ^ F32_SIGN))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmul_d.h b/vendor/riscv-isa-sim/riscv/insns/fmul_d.h new file mode 100644 index 00000000..9189d8d9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmul_d.h @@ -0,0 +1,5 @@ +require_extension('D'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f64_mul(f64(FRS1), f64(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmul_h.h b/vendor/riscv-isa-sim/riscv/insns/fmul_h.h new file mode 100644 index 00000000..0152df8f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmul_h.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZFH); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f16_mul(f16(FRS1), f16(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmul_q.h b/vendor/riscv-isa-sim/riscv/insns/fmul_q.h new file mode 100644 index 00000000..66f5a05c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmul_q.h @@ -0,0 +1,5 @@ +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f128_mul(f128(FRS1), f128(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmul_s.h b/vendor/riscv-isa-sim/riscv/insns/fmul_s.h new file mode 100644 index 00000000..145d5ce4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmul_s.h @@ -0,0 +1,5 @@ +require_extension('F'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f32_mul(f32(FRS1), f32(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmv_d_x.h b/vendor/riscv-isa-sim/riscv/insns/fmv_d_x.h new file mode 100644 index 00000000..0bff5fb7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmv_d_x.h @@ -0,0 +1,4 @@ +require_extension('D'); +require_rv64; +require_fp; +WRITE_FRD(f64(RS1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fmv_h_x.h b/vendor/riscv-isa-sim/riscv/insns/fmv_h_x.h new file mode 100644 index 00000000..e55d607b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmv_h_x.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZFHMIN); +require_fp; +WRITE_FRD(f16(RS1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fmv_w_x.h b/vendor/riscv-isa-sim/riscv/insns/fmv_w_x.h new file mode 100644 index 00000000..5f713231 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmv_w_x.h @@ -0,0 +1,3 @@ +require_extension('F'); +require_fp; +WRITE_FRD(f32(RS1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fmv_x_d.h b/vendor/riscv-isa-sim/riscv/insns/fmv_x_d.h new file mode 100644 index 00000000..e1a23f48 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmv_x_d.h @@ -0,0 +1,4 @@ +require_extension('D'); +require_rv64; +require_fp; +WRITE_RD(FRS1.v[0]); diff --git a/vendor/riscv-isa-sim/riscv/insns/fmv_x_h.h b/vendor/riscv-isa-sim/riscv/insns/fmv_x_h.h new file mode 100644 index 00000000..7a2e5ff6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmv_x_h.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZFHMIN); +require_fp; +WRITE_RD(sext32((int16_t)(FRS1.v[0]))); diff --git a/vendor/riscv-isa-sim/riscv/insns/fmv_x_w.h b/vendor/riscv-isa-sim/riscv/insns/fmv_x_w.h new file mode 100644 index 00000000..6754f869 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmv_x_w.h @@ -0,0 +1,3 @@ +require_extension('F'); +require_fp; +WRITE_RD(sext32(FRS1.v[0])); diff --git a/vendor/riscv-isa-sim/riscv/insns/fnmadd_d.h b/vendor/riscv-isa-sim/riscv/insns/fnmadd_d.h new file mode 100644 index 00000000..e8dd7432 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fnmadd_d.h @@ -0,0 +1,5 @@ +require_extension('D'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f64_mulAdd(f64(f64(FRS1).v ^ F64_SIGN), f64(FRS2), f64(f64(FRS3).v ^ F64_SIGN))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fnmadd_h.h b/vendor/riscv-isa-sim/riscv/insns/fnmadd_h.h new file mode 100644 index 00000000..e4c619e7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fnmadd_h.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZFH); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f16_mulAdd(f16(f16(FRS1).v ^ F16_SIGN), f16(FRS2), f16(f16(FRS3).v ^ F16_SIGN))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fnmadd_q.h b/vendor/riscv-isa-sim/riscv/insns/fnmadd_q.h new file mode 100644 index 00000000..a36ce188 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fnmadd_q.h @@ -0,0 +1,5 @@ +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f128_mulAdd(f128_negate(f128(FRS1)), f128(FRS2), f128_negate(f128(FRS3)))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fnmadd_s.h b/vendor/riscv-isa-sim/riscv/insns/fnmadd_s.h new file mode 100644 index 00000000..1c2996e3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fnmadd_s.h @@ -0,0 +1,5 @@ +require_extension('F'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f32_mulAdd(f32(f32(FRS1).v ^ F32_SIGN), f32(FRS2), f32(f32(FRS3).v ^ F32_SIGN))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fnmsub_d.h b/vendor/riscv-isa-sim/riscv/insns/fnmsub_d.h new file mode 100644 index 00000000..c29a0b93 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fnmsub_d.h @@ -0,0 +1,5 @@ +require_extension('D'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f64_mulAdd(f64(f64(FRS1).v ^ F64_SIGN), f64(FRS2), f64(FRS3))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fnmsub_h.h b/vendor/riscv-isa-sim/riscv/insns/fnmsub_h.h new file mode 100644 index 00000000..0410c3bb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fnmsub_h.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZFH); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f16_mulAdd(f16(f16(FRS1).v ^ F16_SIGN), f16(FRS2), f16(FRS3))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fnmsub_q.h b/vendor/riscv-isa-sim/riscv/insns/fnmsub_q.h new file mode 100644 index 00000000..130b4ce3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fnmsub_q.h @@ -0,0 +1,5 @@ +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f128_mulAdd(f128_negate(f128(FRS1)), f128(FRS2), f128(FRS3))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fnmsub_s.h b/vendor/riscv-isa-sim/riscv/insns/fnmsub_s.h new file mode 100644 index 00000000..4c61fc7c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fnmsub_s.h @@ -0,0 +1,5 @@ +require_extension('F'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f32_mulAdd(f32(f32(FRS1).v ^ F32_SIGN), f32(FRS2), f32(FRS3))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fsd.h b/vendor/riscv-isa-sim/riscv/insns/fsd.h new file mode 100644 index 00000000..38c702b7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsd.h @@ -0,0 +1,3 @@ +require_extension('D'); +require_fp; +MMU.store_uint64(RS1 + insn.s_imm(), FRS2.v[0]); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsgnj_d.h b/vendor/riscv-isa-sim/riscv/insns/fsgnj_d.h new file mode 100644 index 00000000..78f9ce78 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsgnj_d.h @@ -0,0 +1,3 @@ +require_extension('D'); +require_fp; +WRITE_FRD(fsgnj64(FRS1, FRS2, false, false)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsgnj_h.h b/vendor/riscv-isa-sim/riscv/insns/fsgnj_h.h new file mode 100644 index 00000000..79d50f5f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsgnj_h.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZFH); +require_fp; +WRITE_FRD(fsgnj16(FRS1, FRS2, false, false)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsgnj_q.h b/vendor/riscv-isa-sim/riscv/insns/fsgnj_q.h new file mode 100644 index 00000000..0b9a2708 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsgnj_q.h @@ -0,0 +1,3 @@ +require_extension('Q'); +require_fp; +WRITE_FRD(fsgnj128(FRS1, FRS2, false, false)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsgnj_s.h b/vendor/riscv-isa-sim/riscv/insns/fsgnj_s.h new file mode 100644 index 00000000..c1a70cb7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsgnj_s.h @@ -0,0 +1,3 @@ +require_extension('F'); +require_fp; +WRITE_FRD(fsgnj32(FRS1, FRS2, false, false)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsgnjn_d.h b/vendor/riscv-isa-sim/riscv/insns/fsgnjn_d.h new file mode 100644 index 00000000..f02c3116 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsgnjn_d.h @@ -0,0 +1,3 @@ +require_extension('D'); +require_fp; +WRITE_FRD(fsgnj64(FRS1, FRS2, true, false)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsgnjn_h.h b/vendor/riscv-isa-sim/riscv/insns/fsgnjn_h.h new file mode 100644 index 00000000..ebb4ac9f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsgnjn_h.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZFH); +require_fp; +WRITE_FRD(fsgnj16(FRS1, FRS2, true, false)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsgnjn_q.h b/vendor/riscv-isa-sim/riscv/insns/fsgnjn_q.h new file mode 100644 index 00000000..38c7bbff --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsgnjn_q.h @@ -0,0 +1,3 @@ +require_extension('Q'); +require_fp; +WRITE_FRD(fsgnj128(FRS1, FRS2, true, false)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsgnjn_s.h b/vendor/riscv-isa-sim/riscv/insns/fsgnjn_s.h new file mode 100644 index 00000000..35906d65 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsgnjn_s.h @@ -0,0 +1,3 @@ +require_extension('F'); +require_fp; +WRITE_FRD(fsgnj32(FRS1, FRS2, true, false)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsgnjx_d.h b/vendor/riscv-isa-sim/riscv/insns/fsgnjx_d.h new file mode 100644 index 00000000..c1217371 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsgnjx_d.h @@ -0,0 +1,3 @@ +require_extension('D'); +require_fp; +WRITE_FRD(fsgnj64(FRS1, FRS2, false, true)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsgnjx_h.h b/vendor/riscv-isa-sim/riscv/insns/fsgnjx_h.h new file mode 100644 index 00000000..93102695 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsgnjx_h.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZFH); +require_fp; +WRITE_FRD(fsgnj16(FRS1, FRS2, false, true)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsgnjx_q.h b/vendor/riscv-isa-sim/riscv/insns/fsgnjx_q.h new file mode 100644 index 00000000..fc86d26d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsgnjx_q.h @@ -0,0 +1,3 @@ +require_extension('Q'); +require_fp; +WRITE_FRD(fsgnj128(FRS1, FRS2, false, true)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsgnjx_s.h b/vendor/riscv-isa-sim/riscv/insns/fsgnjx_s.h new file mode 100644 index 00000000..4d5c624b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsgnjx_s.h @@ -0,0 +1,3 @@ +require_extension('F'); +require_fp; +WRITE_FRD(fsgnj32(FRS1, FRS2, false, true)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsh.h b/vendor/riscv-isa-sim/riscv/insns/fsh.h new file mode 100644 index 00000000..9eaae1eb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsh.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZFHMIN); +require_fp; +MMU.store_uint16(RS1 + insn.s_imm(), FRS2.v[0]); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsl.h b/vendor/riscv-isa-sim/riscv/insns/fsl.h new file mode 100644 index 00000000..53a21608 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsl.h @@ -0,0 +1,9 @@ +require_extension(EXT_XZBT); +int shamt = RS2 & (2*xlen-1); +reg_t a = RS1, b = RS3; +if (shamt >= xlen) { + a = RS3, b = RS1; + shamt -= xlen; +} +int rshamt = -shamt & (xlen-1); +WRITE_RD(sext_xlen(shamt ? (a << shamt) | (zext_xlen(b) >> rshamt) : a)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fslw.h b/vendor/riscv-isa-sim/riscv/insns/fslw.h new file mode 100644 index 00000000..83940105 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fslw.h @@ -0,0 +1,10 @@ +require_rv64; +require_extension(EXT_XZBT); +int shamt = RS2 & 63; +reg_t a = RS1, b = RS3; +if (shamt >= 32) { + a = RS3, b = RS1; + shamt -= 32; +} +int rshamt = -shamt & 31; +WRITE_RD(sext32(shamt ? (a << shamt) | (zext32(b) >> rshamt) : a)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsq.h b/vendor/riscv-isa-sim/riscv/insns/fsq.h new file mode 100644 index 00000000..610960e5 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsq.h @@ -0,0 +1,3 @@ +require_extension('Q'); +require_fp; +MMU.store_float128(RS1 + insn.s_imm(), FRS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsqrt_d.h b/vendor/riscv-isa-sim/riscv/insns/fsqrt_d.h new file mode 100644 index 00000000..da138ba1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsqrt_d.h @@ -0,0 +1,5 @@ +require_extension('D'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f64_sqrt(f64(FRS1))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fsqrt_h.h b/vendor/riscv-isa-sim/riscv/insns/fsqrt_h.h new file mode 100644 index 00000000..138d5727 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsqrt_h.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZFH); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f16_sqrt(f16(FRS1))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fsqrt_q.h b/vendor/riscv-isa-sim/riscv/insns/fsqrt_q.h new file mode 100644 index 00000000..6cb6ba31 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsqrt_q.h @@ -0,0 +1,5 @@ +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f128_sqrt(f128(FRS1))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fsqrt_s.h b/vendor/riscv-isa-sim/riscv/insns/fsqrt_s.h new file mode 100644 index 00000000..74768466 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsqrt_s.h @@ -0,0 +1,5 @@ +require_extension('F'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f32_sqrt(f32(FRS1))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fsr.h b/vendor/riscv-isa-sim/riscv/insns/fsr.h new file mode 100644 index 00000000..dfb26f11 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsr.h @@ -0,0 +1,9 @@ +require_either_extension(xlen == 32 ? EXT_ZBPBO : EXT_XZBT, EXT_XZBT); +int shamt = RS2 & (2*xlen-1); +reg_t a = RS1, b = RS3; +if (shamt >= xlen) { + a = RS3, b = RS1; + shamt -= xlen; +} +int rshamt = -shamt & (xlen-1); +WRITE_RD(sext_xlen(shamt ? (b << rshamt) | (zext_xlen(a) >> shamt) : a)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsri.h b/vendor/riscv-isa-sim/riscv/insns/fsri.h new file mode 100644 index 00000000..f7186f1b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsri.h @@ -0,0 +1,9 @@ +require_either_extension(xlen == 32 ? EXT_ZBPBO : EXT_XZBT, EXT_XZBT); +int shamt = SHAMT & (2*xlen-1); +reg_t a = RS1, b = RS3; +if (shamt >= xlen) { + a = RS3, b = RS1; + shamt -= xlen; +} +int rshamt = -shamt & (xlen-1); +WRITE_RD(sext_xlen(shamt ? (b << rshamt) | (zext_xlen(a) >> shamt) : a)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsriw.h b/vendor/riscv-isa-sim/riscv/insns/fsriw.h new file mode 100644 index 00000000..7956de7c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsriw.h @@ -0,0 +1,10 @@ +require_rv64; +require_extension(EXT_XZBT); +int shamt = SHAMT & 63; +reg_t a = RS1, b = RS3; +if (shamt >= 32) { + a = RS3, b = RS1; + shamt -= 32; +} +int rshamt = -shamt & 31; +WRITE_RD(sext32(shamt ? (b << rshamt) | (zext32(a) >> shamt) : a)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsrw.h b/vendor/riscv-isa-sim/riscv/insns/fsrw.h new file mode 100644 index 00000000..494fe260 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsrw.h @@ -0,0 +1,10 @@ +require_rv64; +require_either_extension(EXT_ZBPBO, EXT_XZBT); +int shamt = RS2 & 63; +reg_t a = RS1, b = RS3; +if (shamt >= 32) { + a = RS3, b = RS1; + shamt -= 32; +} +int rshamt = -shamt & 31; +WRITE_RD(sext32(shamt ? (b << rshamt) | (zext32(a) >> shamt) : a)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsub_d.h b/vendor/riscv-isa-sim/riscv/insns/fsub_d.h new file mode 100644 index 00000000..1418a063 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsub_d.h @@ -0,0 +1,5 @@ +require_extension('D'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f64_sub(f64(FRS1), f64(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fsub_h.h b/vendor/riscv-isa-sim/riscv/insns/fsub_h.h new file mode 100644 index 00000000..43b51cc2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsub_h.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZFH); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f16_sub(f16(FRS1), f16(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fsub_q.h b/vendor/riscv-isa-sim/riscv/insns/fsub_q.h new file mode 100644 index 00000000..e050e3aa --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsub_q.h @@ -0,0 +1,5 @@ +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f128_sub(f128(FRS1), f128(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fsub_s.h b/vendor/riscv-isa-sim/riscv/insns/fsub_s.h new file mode 100644 index 00000000..f6183ea0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsub_s.h @@ -0,0 +1,5 @@ +require_extension('F'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f32_sub(f32(FRS1), f32(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fsw.h b/vendor/riscv-isa-sim/riscv/insns/fsw.h new file mode 100644 index 00000000..8af51845 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsw.h @@ -0,0 +1,3 @@ +require_extension('F'); +require_fp; +MMU.store_uint32(RS1 + insn.s_imm(), FRS2.v[0]); diff --git a/vendor/riscv-isa-sim/riscv/insns/gorc.h b/vendor/riscv-isa-sim/riscv/insns/gorc.h new file mode 100644 index 00000000..ffe44134 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/gorc.h @@ -0,0 +1,10 @@ +require_extension(EXT_XZBP); +reg_t x = RS1; +int shamt = RS2 & (xlen-1); +if (shamt & 1) x |= ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1); +if (shamt & 2) x |= ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2); +if (shamt & 4) x |= ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4); +if (shamt & 8) x |= ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8); +if (shamt & 16) x |= ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16); +if (shamt & 32) x |= ((x & 0x00000000FFFFFFFFLL) << 32) | ((x & 0xFFFFFFFF00000000LL) >> 32); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/gorci.h b/vendor/riscv-isa-sim/riscv/insns/gorci.h new file mode 100644 index 00000000..d3017f49 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/gorci.h @@ -0,0 +1,13 @@ +// Zbb contains orc.b but not general gorci +require(((SHAMT == 7) && p->extension_enabled(EXT_ZBB)) + || p->extension_enabled(EXT_XZBP)); +require(SHAMT < xlen); +reg_t x = RS1; +int shamt = SHAMT; +if (shamt & 1) x |= ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1); +if (shamt & 2) x |= ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2); +if (shamt & 4) x |= ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4); +if (shamt & 8) x |= ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8); +if (shamt & 16) x |= ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16); +if (shamt & 32) x |= ((x & 0x00000000FFFFFFFFLL) << 32) | ((x & 0xFFFFFFFF00000000LL) >> 32); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/gorciw.h b/vendor/riscv-isa-sim/riscv/insns/gorciw.h new file mode 100644 index 00000000..44ade807 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/gorciw.h @@ -0,0 +1,11 @@ +require_rv64; +require_extension(EXT_XZBP); +require(SHAMT < 32); +reg_t x = RS1; +int shamt = SHAMT; +if (shamt & 1) x |= ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1); +if (shamt & 2) x |= ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2); +if (shamt & 4) x |= ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4); +if (shamt & 8) x |= ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8); +if (shamt & 16) x |= ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16); +WRITE_RD(sext32(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/gorcw.h b/vendor/riscv-isa-sim/riscv/insns/gorcw.h new file mode 100644 index 00000000..611b3caa --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/gorcw.h @@ -0,0 +1,10 @@ +require_rv64; +require_extension(EXT_XZBP); +reg_t x = RS1; +int shamt = RS2 & 31; +if (shamt & 1) x |= ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1); +if (shamt & 2) x |= ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2); +if (shamt & 4) x |= ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4); +if (shamt & 8) x |= ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8); +if (shamt & 16) x |= ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16); +WRITE_RD(sext32(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/grev.h b/vendor/riscv-isa-sim/riscv/insns/grev.h new file mode 100644 index 00000000..7181b3cd --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/grev.h @@ -0,0 +1,10 @@ +require_extension(EXT_XZBP); +reg_t x = RS1; +int shamt = RS2 & (xlen-1); +if (shamt & 1) x = ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1); +if (shamt & 2) x = ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2); +if (shamt & 4) x = ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4); +if (shamt & 8) x = ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8); +if (shamt & 16) x = ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16); +if (shamt & 32) x = ((x & 0x00000000FFFFFFFFLL) << 32) | ((x & 0xFFFFFFFF00000000LL) >> 32); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/grevi.h b/vendor/riscv-isa-sim/riscv/insns/grevi.h new file mode 100644 index 00000000..d4718145 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/grevi.h @@ -0,0 +1,17 @@ +// Zbb contains rev8 but not general grevi +// Zbkb contains rev8 and brev8 (a.k.a. rev.b) but not general grevi +int shamt = SHAMT; +require(((shamt == xlen - 8) && (p->extension_enabled(EXT_ZBB) || p->extension_enabled(EXT_ZBKB))) //rev8 + || ((shamt == 7) && p->extension_enabled(EXT_ZBKB)) // rev8.b + || ((shamt == 8) && p->extension_enabled(EXT_ZPN)) // rev8.h + || ((shamt == xlen - 1) && p->extension_enabled(EXT_ZPN)) // rev + || p->extension_enabled(EXT_XZBP)); +require(shamt < xlen); +reg_t x = RS1; +if (shamt & 1) x = ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1); +if (shamt & 2) x = ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2); +if (shamt & 4) x = ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4); +if (shamt & 8) x = ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8); +if (shamt & 16) x = ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16); +if (shamt & 32) x = ((x & 0x00000000FFFFFFFFLL) << 32) | ((x & 0xFFFFFFFF00000000LL) >> 32); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/greviw.h b/vendor/riscv-isa-sim/riscv/insns/greviw.h new file mode 100644 index 00000000..004ecf34 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/greviw.h @@ -0,0 +1,11 @@ +require_rv64; +require_extension(EXT_XZBP); +require(SHAMT < 32); +reg_t x = RS1; +int shamt = SHAMT; +if (shamt & 1) x = ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1); +if (shamt & 2) x = ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2); +if (shamt & 4) x = ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4); +if (shamt & 8) x = ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8); +if (shamt & 16) x = ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16); +WRITE_RD(sext32(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/grevw.h b/vendor/riscv-isa-sim/riscv/insns/grevw.h new file mode 100644 index 00000000..3fbcf228 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/grevw.h @@ -0,0 +1,10 @@ +require_rv64; +require_extension(EXT_XZBP); +reg_t x = RS1; +int shamt = RS2 & 31; +if (shamt & 1) x = ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1); +if (shamt & 2) x = ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2); +if (shamt & 4) x = ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4); +if (shamt & 8) x = ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8); +if (shamt & 16) x = ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16); +WRITE_RD(sext32(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/hfence_gvma.h b/vendor/riscv-isa-sim/riscv/insns/hfence_gvma.h new file mode 100644 index 00000000..b3ddf1e8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/hfence_gvma.h @@ -0,0 +1,4 @@ +require_extension('H'); +require_novirt(); +require_privilege(get_field(STATE.mstatus->read(), MSTATUS_TVM) ? PRV_M : PRV_S); +MMU.flush_tlb(); diff --git a/vendor/riscv-isa-sim/riscv/insns/hfence_vvma.h b/vendor/riscv-isa-sim/riscv/insns/hfence_vvma.h new file mode 100644 index 00000000..ecd42c19 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/hfence_vvma.h @@ -0,0 +1,4 @@ +require_extension('H'); +require_novirt(); +require_privilege(PRV_S); +MMU.flush_tlb(); diff --git a/vendor/riscv-isa-sim/riscv/insns/hinval_gvma.h b/vendor/riscv-isa-sim/riscv/insns/hinval_gvma.h new file mode 100644 index 00000000..6be5cd94 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/hinval_gvma.h @@ -0,0 +1,2 @@ +require_extension(EXT_SVINVAL); +#include "hfence_gvma.h" diff --git a/vendor/riscv-isa-sim/riscv/insns/hinval_vvma.h b/vendor/riscv-isa-sim/riscv/insns/hinval_vvma.h new file mode 100644 index 00000000..c50707c2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/hinval_vvma.h @@ -0,0 +1,2 @@ +require_extension(EXT_SVINVAL); +#include "hfence_vvma.h" diff --git a/vendor/riscv-isa-sim/riscv/insns/hlv_b.h b/vendor/riscv-isa-sim/riscv/insns/hlv_b.h new file mode 100644 index 00000000..2ccb0463 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/hlv_b.h @@ -0,0 +1,4 @@ +require_extension('H'); +require_novirt(); +require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S); +WRITE_RD(MMU.guest_load_int8(RS1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/hlv_bu.h b/vendor/riscv-isa-sim/riscv/insns/hlv_bu.h new file mode 100644 index 00000000..560f94af --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/hlv_bu.h @@ -0,0 +1,4 @@ +require_extension('H'); +require_novirt(); +require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S); +WRITE_RD(MMU.guest_load_uint8(RS1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/hlv_d.h b/vendor/riscv-isa-sim/riscv/insns/hlv_d.h new file mode 100644 index 00000000..f432b650 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/hlv_d.h @@ -0,0 +1,5 @@ +require_extension('H'); +require_rv64; +require_novirt(); +require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S); +WRITE_RD(MMU.guest_load_int64(RS1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/hlv_h.h b/vendor/riscv-isa-sim/riscv/insns/hlv_h.h new file mode 100644 index 00000000..4cb07e99 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/hlv_h.h @@ -0,0 +1,4 @@ +require_extension('H'); +require_novirt(); +require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S); +WRITE_RD(MMU.guest_load_int16(RS1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/hlv_hu.h b/vendor/riscv-isa-sim/riscv/insns/hlv_hu.h new file mode 100644 index 00000000..adec2f0b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/hlv_hu.h @@ -0,0 +1,4 @@ +require_extension('H'); +require_novirt(); +require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S); +WRITE_RD(MMU.guest_load_uint16(RS1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/hlv_w.h b/vendor/riscv-isa-sim/riscv/insns/hlv_w.h new file mode 100644 index 00000000..b2e102f0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/hlv_w.h @@ -0,0 +1,4 @@ +require_extension('H'); +require_novirt(); +require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S); +WRITE_RD(MMU.guest_load_int32(RS1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/hlv_wu.h b/vendor/riscv-isa-sim/riscv/insns/hlv_wu.h new file mode 100644 index 00000000..1f921c0f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/hlv_wu.h @@ -0,0 +1,5 @@ +require_extension('H'); +require_rv64; +require_novirt(); +require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S); +WRITE_RD(MMU.guest_load_uint32(RS1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/hlvx_hu.h b/vendor/riscv-isa-sim/riscv/insns/hlvx_hu.h new file mode 100644 index 00000000..3eb699c1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/hlvx_hu.h @@ -0,0 +1,4 @@ +require_extension('H'); +require_novirt(); +require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S); +WRITE_RD(MMU.guest_load_x_uint16(RS1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/hlvx_wu.h b/vendor/riscv-isa-sim/riscv/insns/hlvx_wu.h new file mode 100644 index 00000000..33e2fa1b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/hlvx_wu.h @@ -0,0 +1,4 @@ +require_extension('H'); +require_novirt(); +require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S); +WRITE_RD(sext_xlen(MMU.guest_load_x_uint32(RS1))); diff --git a/vendor/riscv-isa-sim/riscv/insns/hsv_b.h b/vendor/riscv-isa-sim/riscv/insns/hsv_b.h new file mode 100644 index 00000000..15f6a268 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/hsv_b.h @@ -0,0 +1,4 @@ +require_extension('H'); +require_novirt(); +require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S); +MMU.guest_store_uint8(RS1, RS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/hsv_d.h b/vendor/riscv-isa-sim/riscv/insns/hsv_d.h new file mode 100644 index 00000000..83c3376e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/hsv_d.h @@ -0,0 +1,5 @@ +require_extension('H'); +require_rv64; +require_novirt(); +require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S); +MMU.guest_store_uint64(RS1, RS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/hsv_h.h b/vendor/riscv-isa-sim/riscv/insns/hsv_h.h new file mode 100644 index 00000000..eaa2a2cb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/hsv_h.h @@ -0,0 +1,4 @@ +require_extension('H'); +require_novirt(); +require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S); +MMU.guest_store_uint16(RS1, RS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/hsv_w.h b/vendor/riscv-isa-sim/riscv/insns/hsv_w.h new file mode 100644 index 00000000..0d2c3d4d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/hsv_w.h @@ -0,0 +1,4 @@ +require_extension('H'); +require_novirt(); +require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S); +MMU.guest_store_uint32(RS1, RS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/insb.h b/vendor/riscv-isa-sim/riscv/insns/insb.h new file mode 100644 index 00000000..020e9051 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/insb.h @@ -0,0 +1,4 @@ +require_extension(EXT_ZPN); +reg_t bpos = insn.p_imm3(); +require(bpos < (unsigned long)xlen/8); // imm[2] == 1 is illegal on rv32 +WRITE_RD(sext_xlen(set_field(RD, make_mask64(bpos * 8, 8), P_B(RS1, 0)))); diff --git a/vendor/riscv-isa-sim/riscv/insns/jal.h b/vendor/riscv-isa-sim/riscv/insns/jal.h new file mode 100644 index 00000000..cd599641 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/jal.h @@ -0,0 +1,3 @@ +reg_t tmp = npc; +set_pc(JUMP_TARGET); +WRITE_RD(tmp); diff --git a/vendor/riscv-isa-sim/riscv/insns/jalr.h b/vendor/riscv-isa-sim/riscv/insns/jalr.h new file mode 100644 index 00000000..386e8db1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/jalr.h @@ -0,0 +1,3 @@ +reg_t tmp = npc; +set_pc((RS1 + insn.i_imm()) & ~reg_t(1)); +WRITE_RD(tmp); diff --git a/vendor/riscv-isa-sim/riscv/insns/kabs16.h b/vendor/riscv-isa-sim/riscv/insns/kabs16.h new file mode 100644 index 00000000..8d1d9b83 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kabs16.h @@ -0,0 +1,10 @@ +require_vector_vs; +P_ONE_LOOP(16, { + pd = ps1; + if (ps1 == INT16_MIN) { + pd = INT16_MAX; + P_SET_OV(1); + } else if (ps1 < 0) { + pd = - ps1; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kabs32.h b/vendor/riscv-isa-sim/riscv/insns/kabs32.h new file mode 100644 index 00000000..0536aaca --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kabs32.h @@ -0,0 +1,11 @@ +require_vector_vs; +require_rv64; +P_ONE_LOOP(32, { + pd = ps1; + if (ps1 == INT32_MIN) { + pd = INT32_MAX; + P_SET_OV(1); + } else if (ps1 < 0) { + pd = - ps1; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kabs8.h b/vendor/riscv-isa-sim/riscv/insns/kabs8.h new file mode 100644 index 00000000..2e6e1f16 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kabs8.h @@ -0,0 +1,10 @@ +require_vector_vs; +P_ONE_LOOP(8, { + pd = ps1; + if (ps1 == INT8_MIN) { + pd = INT8_MAX; + P_SET_OV(1); + } else if (ps1 < 0) { + pd = - ps1; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kabsw.h b/vendor/riscv-isa-sim/riscv/insns/kabsw.h new file mode 100644 index 00000000..5e83b759 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kabsw.h @@ -0,0 +1,10 @@ +require_vector_vs; +require_extension(EXT_ZPN); +int32_t rs1 = P_W(RS1, 0); + +if (rs1 == INT32_MIN) { + rs1 = INT32_MAX; + P_SET_OV(1); +} + +WRITE_RD(sext_xlen(rs1 >= 0 ? rs1 : -rs1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/kadd16.h b/vendor/riscv-isa-sim/riscv/insns/kadd16.h new file mode 100644 index 00000000..b6defe1f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kadd16.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_LOOP(16, { + bool sat = false; + pd = (sat_add(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kadd32.h b/vendor/riscv-isa-sim/riscv/insns/kadd32.h new file mode 100644 index 00000000..1728847a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kadd32.h @@ -0,0 +1,7 @@ +require_vector_vs; +require_rv64; +P_LOOP(32, { + bool sat = false; + pd = (sat_add(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kadd64.h b/vendor/riscv-isa-sim/riscv/insns/kadd64.h new file mode 100644 index 00000000..c58fff09 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kadd64.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_64_PROFILE({ + bool sat = false; + rd = (sat_add(rs1, rs2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kadd8.h b/vendor/riscv-isa-sim/riscv/insns/kadd8.h new file mode 100644 index 00000000..98864c70 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kadd8.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_LOOP(8, { + bool sat = false; + pd = (sat_add(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kaddh.h b/vendor/riscv-isa-sim/riscv/insns/kaddh.h new file mode 100644 index 00000000..43aedb2d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kaddh.h @@ -0,0 +1,5 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t res = (sreg_t)P_SH(RS1, 0) + (sreg_t)P_SH(RS2, 0); +P_SAT(res, 16); +WRITE_RD(sext_xlen((int16_t)res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/kaddw.h b/vendor/riscv-isa-sim/riscv/insns/kaddw.h new file mode 100644 index 00000000..3298d57e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kaddw.h @@ -0,0 +1,5 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t res = (sreg_t)P_SW(RS1, 0) + (sreg_t)P_SW(RS2, 0); +P_SAT(res, 32); +WRITE_RD(sext32(res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/kcras16.h b/vendor/riscv-isa-sim/riscv/insns/kcras16.h new file mode 100644 index 00000000..d7464253 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kcras16.h @@ -0,0 +1,10 @@ +require_vector_vs; +P_CROSS_ULOOP(16, { + bool sat = false; + pd = (sat_add(ps1, ps2, sat)); + P_SET_OV(sat); +}, { + bool sat = false; + pd = (sat_sub(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kcras32.h b/vendor/riscv-isa-sim/riscv/insns/kcras32.h new file mode 100644 index 00000000..ead31f8a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kcras32.h @@ -0,0 +1,11 @@ +require_vector_vs; +require_rv64; +P_CROSS_ULOOP(32, { + bool sat = false; + pd = (sat_add(ps1, ps2, sat)); + P_SET_OV(sat); +}, { + bool sat = false; + pd = (sat_sub(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kcrsa16.h b/vendor/riscv-isa-sim/riscv/insns/kcrsa16.h new file mode 100644 index 00000000..2a7ca4d2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kcrsa16.h @@ -0,0 +1,10 @@ +require_vector_vs; +P_CROSS_ULOOP(16, { + bool sat = false; + pd = (sat_sub(ps1, ps2, sat)); + P_SET_OV(sat); +}, { + bool sat = false; + pd = (sat_add(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kcrsa32.h b/vendor/riscv-isa-sim/riscv/insns/kcrsa32.h new file mode 100644 index 00000000..b688fd3c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kcrsa32.h @@ -0,0 +1,11 @@ +require_vector_vs; +require_rv64; +P_CROSS_ULOOP(32, { + bool sat = false; + pd = (sat_sub(ps1, ps2, sat)); + P_SET_OV(sat); +}, { + bool sat = false; + pd = (sat_add(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kdmabb.h b/vendor/riscv-isa-sim/riscv/insns/kdmabb.h new file mode 100644 index 00000000..7ca05639 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kdmabb.h @@ -0,0 +1,17 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t res; +sreg_t aop = P_SH(RS1, 0); +sreg_t bop = P_SH(RS2, 0); + +if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + res = aop * bop; + res <<= 1; +} else { + res = INT32_MAX; + P_SET_OV(1); +} + +res += sext32(RD); +P_SAT(res, 32); +WRITE_RD(sext32(res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/kdmabb16.h b/vendor/riscv-isa-sim/riscv/insns/kdmabb16.h new file mode 100644 index 00000000..2ccd40b1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kdmabb16.h @@ -0,0 +1,18 @@ +require_vector_vs; +require_rv64; +P_LOOP(32, { + int32_t aop = P_SH(ps1, 0); + int32_t bop = P_SH(ps2, 0); + int32_t mres; + bool sat; + + if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + mres = aop * bop; + mres <<= 1; + } else { + mres = INT32_MAX; + P_SET_OV(1); + } + pd = (sat_add(pd, mres, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kdmabt.h b/vendor/riscv-isa-sim/riscv/insns/kdmabt.h new file mode 100644 index 00000000..d50a6dfa --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kdmabt.h @@ -0,0 +1,17 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t res; +sreg_t aop = P_SH(RS1, 0); +sreg_t bop = P_SH(RS2, 1); + +if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + res = aop * bop; + res <<= 1; +} else { + res = INT32_MAX; + P_SET_OV(1); +} + +res += sext32(RD); +P_SAT(res, 32); +WRITE_RD(sext32(res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/kdmabt16.h b/vendor/riscv-isa-sim/riscv/insns/kdmabt16.h new file mode 100644 index 00000000..49538b38 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kdmabt16.h @@ -0,0 +1,18 @@ +require_vector_vs; +require_rv64; +P_LOOP(32, { + int32_t aop = P_SH(ps1, 0); + int32_t bop = P_SH(ps2, 1); + int32_t mres; + bool sat; + + if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + mres = aop * bop; + mres <<= 1; + } else { + mres = INT32_MAX; + P_SET_OV(1); + } + pd = (sat_add(pd, mres, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kdmatt.h b/vendor/riscv-isa-sim/riscv/insns/kdmatt.h new file mode 100644 index 00000000..e917d414 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kdmatt.h @@ -0,0 +1,17 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t res; +sreg_t aop = P_SH(RS1, 1); +sreg_t bop = P_SH(RS2, 1); + +if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + res = aop * bop; + res <<= 1; +} else { + res = INT32_MAX; + P_SET_OV(1); +} + +res += sext32(RD); +P_SAT(res, 32); +WRITE_RD(sext32(res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/kdmatt16.h b/vendor/riscv-isa-sim/riscv/insns/kdmatt16.h new file mode 100644 index 00000000..ebce13f4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kdmatt16.h @@ -0,0 +1,18 @@ +require_vector_vs; +require_rv64; +P_LOOP(32, { + int32_t aop = P_SH(ps1, 1); + int32_t bop = P_SH(ps2, 1); + int32_t mres; + bool sat; + + if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + mres = aop * bop; + mres <<= 1; + } else { + mres = INT32_MAX; + P_SET_OV(1); + } + pd = (sat_add(pd, mres, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kdmbb.h b/vendor/riscv-isa-sim/riscv/insns/kdmbb.h new file mode 100644 index 00000000..2f7a3f95 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kdmbb.h @@ -0,0 +1,13 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t res; +sreg_t aop = P_SH(RS1, 0); +sreg_t bop = P_SH(RS2, 0); +if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + res = aop * bop; + res <<= 1; +} else { + res = INT32_MAX; + P_SET_OV(1); +} +WRITE_RD(sext32(res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/kdmbb16.h b/vendor/riscv-isa-sim/riscv/insns/kdmbb16.h new file mode 100644 index 00000000..a84877d2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kdmbb16.h @@ -0,0 +1,13 @@ +require_vector_vs; +require_rv64; +P_LOOP(32, { + int32_t aop = P_SH(ps1, 0); + int32_t bop = P_SH(ps2, 0); + if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + pd = aop * bop; + pd <<= 1; + } else { + pd = INT32_MAX; + P_SET_OV(1); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kdmbt.h b/vendor/riscv-isa-sim/riscv/insns/kdmbt.h new file mode 100644 index 00000000..7f093e3a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kdmbt.h @@ -0,0 +1,13 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t res; +sreg_t aop = P_SH(RS1, 0); +sreg_t bop = P_SH(RS2, 1); +if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + res = aop * bop; + res <<= 1; +} else { + res = INT32_MAX; + P_SET_OV(1); +} +WRITE_RD(sext32(res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/kdmbt16.h b/vendor/riscv-isa-sim/riscv/insns/kdmbt16.h new file mode 100644 index 00000000..85e9d0e6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kdmbt16.h @@ -0,0 +1,13 @@ +require_vector_vs; +require_rv64; +P_LOOP(32, { + int32_t aop = P_SH(ps1, 0); + int32_t bop = P_SH(ps2, 1); + if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + pd = aop * bop; + pd <<= 1; + } else { + pd = INT32_MAX; + P_SET_OV(1); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kdmtt.h b/vendor/riscv-isa-sim/riscv/insns/kdmtt.h new file mode 100644 index 00000000..05a4c8c2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kdmtt.h @@ -0,0 +1,13 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t res; +sreg_t aop = P_SH(RS1, 1); +sreg_t bop = P_SH(RS2, 1); +if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + res = aop * bop; + res <<= 1; +} else { + res = INT32_MAX; + P_SET_OV(1); +} +WRITE_RD(sext32(res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/kdmtt16.h b/vendor/riscv-isa-sim/riscv/insns/kdmtt16.h new file mode 100644 index 00000000..2190710a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kdmtt16.h @@ -0,0 +1,13 @@ +require_vector_vs; +require_rv64; +P_LOOP(32, { + int32_t aop = P_SH(ps1, 1); + int32_t bop = P_SH(ps2, 1); + if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + pd = aop * bop; + pd <<= 1; + } else { + pd = INT32_MAX; + P_SET_OV(1); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/khm16.h b/vendor/riscv-isa-sim/riscv/insns/khm16.h new file mode 100644 index 00000000..9c2e28c3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/khm16.h @@ -0,0 +1,9 @@ +require_vector_vs; +P_LOOP(16, { + if ((ps1 != INT16_MIN) | (ps2 != INT16_MIN)) { + pd = (ps1 * ps2) >> 15; + } else { + pd = INT16_MAX; + P_SET_OV(1); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/khm8.h b/vendor/riscv-isa-sim/riscv/insns/khm8.h new file mode 100644 index 00000000..ac21d68c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/khm8.h @@ -0,0 +1,9 @@ +require_vector_vs; +P_LOOP(8, { + if ((ps1 != INT8_MIN) | (ps2 != INT8_MIN)) { + pd = (ps1 * ps2) >> 7; + } else { + pd = INT8_MAX; + P_SET_OV(1); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/khmbb.h b/vendor/riscv-isa-sim/riscv/insns/khmbb.h new file mode 100644 index 00000000..e08eddca --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/khmbb.h @@ -0,0 +1,13 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t res; +sreg_t aop = P_SH(RS1, 0); +sreg_t bop = P_SH(RS2, 0); +if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + res = aop * bop; + res >>= 15; +} else { + res = INT16_MAX; + P_SET_OV(1); +} +WRITE_RD(sext_xlen((int16_t)res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/khmbb16.h b/vendor/riscv-isa-sim/riscv/insns/khmbb16.h new file mode 100644 index 00000000..efbd7eb6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/khmbb16.h @@ -0,0 +1,14 @@ +require_vector_vs; +require_rv64; +P_LOOP(32, { + int32_t aop = P_SH(ps1, 0); + int32_t bop = P_SH(ps2, 0); + if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + pd = aop * bop; + pd >>= 15; + } else { + pd = INT16_MAX; + P_SET_OV(1); + } + pd = (int16_t)pd; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/khmbt.h b/vendor/riscv-isa-sim/riscv/insns/khmbt.h new file mode 100644 index 00000000..0c19cd16 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/khmbt.h @@ -0,0 +1,13 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t res; +sreg_t aop = P_SH(RS1, 0); +sreg_t bop = P_SH(RS2, 1); +if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + res = aop * bop; + res >>= 15; +} else { + res = INT16_MAX; + P_SET_OV(1); +} +WRITE_RD(sext_xlen((int16_t)res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/khmbt16.h b/vendor/riscv-isa-sim/riscv/insns/khmbt16.h new file mode 100644 index 00000000..4bb1f48c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/khmbt16.h @@ -0,0 +1,14 @@ +require_vector_vs; +require_rv64; +P_LOOP(32, { + int32_t aop = P_SH(ps1, 0); + int32_t bop = P_SH(ps2, 1); + if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + pd = aop * bop; + pd >>= 15; + } else { + pd = INT16_MAX; + P_SET_OV(1); + } + pd = (int16_t)pd; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/khmtt.h b/vendor/riscv-isa-sim/riscv/insns/khmtt.h new file mode 100644 index 00000000..dcd45030 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/khmtt.h @@ -0,0 +1,13 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t res; +sreg_t aop = P_SH(RS1, 1); +sreg_t bop = P_SH(RS2, 1); +if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + res = aop * bop; + res >>= 15; +} else { + res = INT16_MAX; + P_SET_OV(1); +} +WRITE_RD(sext_xlen((int16_t)res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/khmtt16.h b/vendor/riscv-isa-sim/riscv/insns/khmtt16.h new file mode 100644 index 00000000..d3c0b4cf --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/khmtt16.h @@ -0,0 +1,14 @@ +require_vector_vs; +require_rv64; +P_LOOP(32, { + int32_t aop = P_SH(ps1, 1); + int32_t bop = P_SH(ps2, 1); + if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + pd = aop * bop; + pd >>= 15; + } else { + pd = INT16_MAX; + P_SET_OV(1); + } + pd = (int16_t)pd; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/khmx16.h b/vendor/riscv-isa-sim/riscv/insns/khmx16.h new file mode 100644 index 00000000..bf934627 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/khmx16.h @@ -0,0 +1,9 @@ +require_vector_vs; +P_CROSS_LOOP(16, { + if ((ps1 != INT16_MIN) | (ps2 != INT16_MIN)) { + pd = (ps1 * ps2) >> 15; + } else { + pd = INT16_MAX; + P_SET_OV(1); + } +},) diff --git a/vendor/riscv-isa-sim/riscv/insns/khmx8.h b/vendor/riscv-isa-sim/riscv/insns/khmx8.h new file mode 100644 index 00000000..0d6a5d5a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/khmx8.h @@ -0,0 +1,9 @@ +require_vector_vs; +P_CROSS_LOOP(8, { + if ((ps1 != INT8_MIN) | (ps2 != INT8_MIN)) { + pd = (ps1 * ps2) >> 7; + } else { + pd = INT8_MAX; + P_SET_OV(1); + } +},) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmabb.h b/vendor/riscv-isa-sim/riscv/insns/kmabb.h new file mode 100644 index 00000000..f2d7715e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmabb.h @@ -0,0 +1,7 @@ +require_vector_vs; +P_LOOP(32, { + int32_t mres = P_SH(ps1, 0) * P_SH(ps2, 0); + bool sat = false; + pd = (sat_add(pd, mres, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmabb32.h b/vendor/riscv-isa-sim/riscv/insns/kmabb32.h new file mode 100644 index 00000000..752bf8b5 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmabb32.h @@ -0,0 +1,8 @@ +require_vector_vs; +require_rv64; +require_extension(EXT_ZPN); + +bool sat = false; +sreg_t mres = (sreg_t)P_SW(RS1, 0) * P_SW(RS2, 0); +WRITE_RD((sat_add(RD, mres, sat))); +P_SET_OV(sat); diff --git a/vendor/riscv-isa-sim/riscv/insns/kmabt.h b/vendor/riscv-isa-sim/riscv/insns/kmabt.h new file mode 100644 index 00000000..4ead23bc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmabt.h @@ -0,0 +1,7 @@ +require_vector_vs; +P_LOOP(32, { + int32_t mres = P_SH(ps1, 0) * P_SH(ps2, 1); + bool sat = false; + pd = (sat_add(pd, mres, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmabt32.h b/vendor/riscv-isa-sim/riscv/insns/kmabt32.h new file mode 100644 index 00000000..ee7511bd --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmabt32.h @@ -0,0 +1,8 @@ +require_vector_vs; +require_rv64; +require_extension(EXT_ZPN); + +bool sat = false; +sreg_t mres = (sreg_t)P_SW(RS1, 0) * P_SW(RS2, 1); +WRITE_RD((sat_add(RD, mres, sat))); +P_SET_OV(sat); diff --git a/vendor/riscv-isa-sim/riscv/insns/kmada.h b/vendor/riscv-isa-sim/riscv/insns/kmada.h new file mode 100644 index 00000000..3c082c7b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmada.h @@ -0,0 +1,4 @@ +require_vector_vs; +P_REDUCTION_LOOP(32, 16, true, true, { + pd_res += ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmadrs.h b/vendor/riscv-isa-sim/riscv/insns/kmadrs.h new file mode 100644 index 00000000..a4503517 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmadrs.h @@ -0,0 +1,7 @@ +require_vector_vs; +P_REDUCTION_LOOP(32, 16, true, true, { + if (j & 1) + pd_res -= ps1 * ps2; + else + pd_res += ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmadrs32.h b/vendor/riscv-isa-sim/riscv/insns/kmadrs32.h new file mode 100644 index 00000000..0f71e90f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmadrs32.h @@ -0,0 +1,10 @@ +require_vector_vs; +require_rv64; +require_extension(EXT_ZPN); + +bool sat; +sreg_t mres0 = (sreg_t)P_SW(RS1, 0) * P_SW(RS2, 0); +sreg_t mres1 = (sreg_t)P_SW(RS1, 1) * P_SW(RS2, 1); + +WRITE_RD((sat_add(RD, mres0, -mres1, sat))); +P_SET_OV(sat); diff --git a/vendor/riscv-isa-sim/riscv/insns/kmads.h b/vendor/riscv-isa-sim/riscv/insns/kmads.h new file mode 100644 index 00000000..89aabe05 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmads.h @@ -0,0 +1,7 @@ +require_vector_vs; +P_REDUCTION_LOOP(32, 16, true, true, { + if (j & 1) + pd_res += ps1 * ps2; + else + pd_res -= ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmads32.h b/vendor/riscv-isa-sim/riscv/insns/kmads32.h new file mode 100644 index 00000000..0a3b5905 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmads32.h @@ -0,0 +1,10 @@ +require_vector_vs; +require_rv64; +require_extension(EXT_ZPN); + +bool sat; +sreg_t mres0 = (sreg_t)P_SW(RS1, 0) * P_SW(RS2, 0); +sreg_t mres1 = (sreg_t)P_SW(RS1, 1) * P_SW(RS2, 1); + +WRITE_RD((sat_add(RD, -mres0, mres1, sat))); +P_SET_OV(sat); diff --git a/vendor/riscv-isa-sim/riscv/insns/kmar64.h b/vendor/riscv-isa-sim/riscv/insns/kmar64.h new file mode 100644 index 00000000..49f44823 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmar64.h @@ -0,0 +1,16 @@ +require_vector_vs; +P_64_PROFILE_BASE() +P_64_PROFILE_PARAM(true, false) + +bool sat = false; +sreg_t mres0 = (sreg_t)P_SW(rs1, 0) * P_SW(rs2, 0); +sreg_t mres1 = (sreg_t)P_SW(rs1, 1) * P_SW(rs2, 1); +sreg_t res; + +if (xlen == 32) { + rd = (sat_add(rd, mres0, sat)); +} else { + rd = (sat_add(rd, mres0, mres1, sat)); +} +P_SET_OV(sat); +P_64_PROFILE_END() diff --git a/vendor/riscv-isa-sim/riscv/insns/kmatt.h b/vendor/riscv-isa-sim/riscv/insns/kmatt.h new file mode 100644 index 00000000..4be2f3d9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmatt.h @@ -0,0 +1,7 @@ +require_vector_vs; +P_LOOP(32, { + int32_t mres = P_SH(ps1, 1) * P_SH(ps2, 1); + bool sat = false; + pd = (sat_add(pd, mres, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmatt32.h b/vendor/riscv-isa-sim/riscv/insns/kmatt32.h new file mode 100644 index 00000000..4fe9ed2b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmatt32.h @@ -0,0 +1,8 @@ +require_vector_vs; +require_rv64; +require_extension(EXT_ZPN); + +bool sat = false; +sreg_t mres = (sreg_t)P_SW(RS1, 1) * P_SW(RS2, 1); +WRITE_RD((sat_add(RD, mres, sat))); +P_SET_OV(sat); diff --git a/vendor/riscv-isa-sim/riscv/insns/kmaxda.h b/vendor/riscv-isa-sim/riscv/insns/kmaxda.h new file mode 100644 index 00000000..393f0472 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmaxda.h @@ -0,0 +1,4 @@ +require_vector_vs; +P_REDUCTION_CROSS_LOOP(32, 16, true, true, { + pd_res += ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmaxda32.h b/vendor/riscv-isa-sim/riscv/insns/kmaxda32.h new file mode 100644 index 00000000..b9346b96 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmaxda32.h @@ -0,0 +1,10 @@ +require_vector_vs; +require_rv64; +require_extension(EXT_ZPN); + +bool sat; +sreg_t mres0 = (sreg_t)P_SW(RS1, 0) * P_SW(RS2, 1); +sreg_t mres1 = (sreg_t)P_SW(RS1, 1) * P_SW(RS2, 0); + +WRITE_RD((sat_add(RD, mres0, mres1, sat))); +P_SET_OV(sat); diff --git a/vendor/riscv-isa-sim/riscv/insns/kmaxds.h b/vendor/riscv-isa-sim/riscv/insns/kmaxds.h new file mode 100644 index 00000000..c2f0e591 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmaxds.h @@ -0,0 +1,7 @@ +require_vector_vs; +P_REDUCTION_CROSS_LOOP(32, 16, true, true, { + if (j & 1) + pd_res += ps1 * ps2; + else + pd_res -= ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmaxds32.h b/vendor/riscv-isa-sim/riscv/insns/kmaxds32.h new file mode 100644 index 00000000..6a7d64e4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmaxds32.h @@ -0,0 +1,10 @@ +require_vector_vs; +require_rv64; +require_extension(EXT_ZPN); + +bool sat; +sreg_t mres0 = (sreg_t)P_SW(RS1, 0) * P_SW(RS2, 1); +sreg_t mres1 = (sreg_t)P_SW(RS1, 1) * P_SW(RS2, 0); + +WRITE_RD((sat_add(RD, -mres0, mres1, sat))); +P_SET_OV(sat); diff --git a/vendor/riscv-isa-sim/riscv/insns/kmda.h b/vendor/riscv-isa-sim/riscv/insns/kmda.h new file mode 100644 index 00000000..68b6c9a7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmda.h @@ -0,0 +1,4 @@ +require_vector_vs; +P_REDUCTION_LOOP(32, 16, false, true, { + pd_res += ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmda32.h b/vendor/riscv-isa-sim/riscv/insns/kmda32.h new file mode 100644 index 00000000..646021f1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmda32.h @@ -0,0 +1,10 @@ +require_vector_vs; +require_rv64; +require_extension(EXT_ZPN); + +bool sat; +sreg_t mres0 = (sreg_t)P_SW(RS1, 0) * P_SW(RS2, 0); +sreg_t mres1 = (sreg_t)P_SW(RS1, 1) * P_SW(RS2, 1); + +WRITE_RD((sat_add(mres0, mres1, sat))); +P_SET_OV(sat); diff --git a/vendor/riscv-isa-sim/riscv/insns/kmmac.h b/vendor/riscv-isa-sim/riscv/insns/kmmac.h new file mode 100644 index 00000000..946f0fe5 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmmac.h @@ -0,0 +1,7 @@ +require_vector_vs; +P_LOOP(32, { + int64_t mres = (int64_t) ps1 * (int64_t) ps2; + bool sat = false; + pd = (sat_add(pd, (mres >> 32), sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmmac_u.h b/vendor/riscv-isa-sim/riscv/insns/kmmac_u.h new file mode 100644 index 00000000..5a06a4db --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmmac_u.h @@ -0,0 +1,8 @@ +require_vector_vs; +P_LOOP(32, { + int64_t mres = (int64_t) ps1 * (int64_t) ps2; + int32_t round = (((mres >> 31) + 1) >> 1); + bool sat = false; + pd = (sat_add(pd, round, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmmawb.h b/vendor/riscv-isa-sim/riscv/insns/kmmawb.h new file mode 100644 index 00000000..0e3a6944 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmmawb.h @@ -0,0 +1,7 @@ +require_vector_vs; +P_LOOP(32, { + int64_t mres = (int64_t)ps1 * P_SH(ps2, 0); + bool sat = false; + pd = (sat_add(pd, (mres >> 16), sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmmawb2.h b/vendor/riscv-isa-sim/riscv/insns/kmmawb2.h new file mode 100644 index 00000000..6b3aa0dd --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmmawb2.h @@ -0,0 +1,15 @@ +require_vector_vs; +P_LOOP(32, { + int64_t addop = 0; + int64_t mres = 0; + bool sat = false; + if((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 0))) { + mres = ((int64_t) ps1 * P_SH(ps2, 0)) << 1; + addop = mres >> 16; + } else { + addop = INT32_MAX; + P_SET_OV(1); + } + pd = (sat_add(pd, addop, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmmawb2_u.h b/vendor/riscv-isa-sim/riscv/insns/kmmawb2_u.h new file mode 100644 index 00000000..f44346e1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmmawb2_u.h @@ -0,0 +1,15 @@ +require_vector_vs; +P_LOOP(32, { + int64_t addop = 0; + int64_t mres = 0; + bool sat = false; + if((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 0))) { + mres = ((int64_t) ps1 * P_SH(ps2, 0)) << 1; + addop = ((mres >> 15) + 1) >> 1; + } else { + addop = INT32_MAX; + P_SET_OV(1); + } + pd = (sat_add(pd, addop, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmmawb_u.h b/vendor/riscv-isa-sim/riscv/insns/kmmawb_u.h new file mode 100644 index 00000000..766dd716 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmmawb_u.h @@ -0,0 +1,8 @@ +require_vector_vs; +P_LOOP(32, { + int64_t mres = (int64_t)ps1 * P_SH(ps2, 0); + int32_t round = (((mres >> 15) + 1) >> 1); + bool sat = false; + pd = (sat_add(pd, round, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmmawt.h b/vendor/riscv-isa-sim/riscv/insns/kmmawt.h new file mode 100644 index 00000000..514ee484 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmmawt.h @@ -0,0 +1,7 @@ +require_vector_vs; +P_LOOP(32, { + int64_t mres = (int64_t)ps1 * P_SH(ps2, 1); + bool sat = false; + pd = (sat_add(pd, (mres >> 16), sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmmawt2.h b/vendor/riscv-isa-sim/riscv/insns/kmmawt2.h new file mode 100644 index 00000000..3cd72de7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmmawt2.h @@ -0,0 +1,15 @@ +require_vector_vs; +P_LOOP(32, { + int64_t addop = 0; + int64_t mres = 0; + bool sat = false; + if((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 1))) { + mres = ((int64_t) ps1 * P_SH(ps2, 1)) << 1; + addop = mres >> 16; + } else { + addop = INT32_MAX; + P_SET_OV(1); + } + pd = (sat_add(pd, addop, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmmawt2_u.h b/vendor/riscv-isa-sim/riscv/insns/kmmawt2_u.h new file mode 100644 index 00000000..7fe378c1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmmawt2_u.h @@ -0,0 +1,15 @@ +require_vector_vs; +P_LOOP(32, { + int64_t addop = 0; + int64_t mres = 0; + bool sat = false; + if((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 1))) { + mres = ((int64_t) ps1 * P_SH(ps2, 1)) << 1; + addop = ((mres >> 15) + 1) >> 1; + } else { + addop = INT32_MAX; + P_SET_OV(1); + } + pd = (sat_add(pd, addop, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmmawt_u.h b/vendor/riscv-isa-sim/riscv/insns/kmmawt_u.h new file mode 100644 index 00000000..74d8fd01 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmmawt_u.h @@ -0,0 +1,8 @@ +require_vector_vs; +P_LOOP(32, { + int64_t mres = (int64_t)ps1 * P_SH(ps2, 1); + int32_t round = (((mres >> 15) + 1) >> 1); + bool sat = false; + pd = (sat_add(pd, round, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmmsb.h b/vendor/riscv-isa-sim/riscv/insns/kmmsb.h new file mode 100644 index 00000000..29ad1bfa --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmmsb.h @@ -0,0 +1,7 @@ +require_vector_vs; +P_LOOP(32, { + int64_t mres = (int64_t) ps1 * (int64_t) ps2; + bool sat = false; + pd = (sat_sub(pd, (mres >> 32), sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmmsb_u.h b/vendor/riscv-isa-sim/riscv/insns/kmmsb_u.h new file mode 100644 index 00000000..c7b283ea --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmmsb_u.h @@ -0,0 +1,8 @@ +require_vector_vs; +P_LOOP(32, { + int64_t mres = (int64_t) ps1 * (int64_t) ps2; + int32_t round = (((mres >> 31) + 1) >> 1); + bool sat = false; + pd = (sat_sub(pd, round, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmmwb2.h b/vendor/riscv-isa-sim/riscv/insns/kmmwb2.h new file mode 100644 index 00000000..272f7380 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmmwb2.h @@ -0,0 +1,10 @@ +require_vector_vs; +P_LOOP(32, { + if((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 0))) { + int64_t mres = ((int64_t) ps1 * P_SH(ps2, 0)) << 1; + pd = mres >> 16; + } else { + pd = INT32_MAX; + P_SET_OV(1); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmmwb2_u.h b/vendor/riscv-isa-sim/riscv/insns/kmmwb2_u.h new file mode 100644 index 00000000..b5a5006c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmmwb2_u.h @@ -0,0 +1,10 @@ +require_vector_vs; +P_LOOP(32, { + if((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 0))) { + int64_t mres = ((int64_t) ps1 * P_SH(ps2, 0)) << 1; + pd = ((mres >> 15) + 1) >> 1; + } else { + pd = INT32_MAX; + P_SET_OV(1); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmmwt2.h b/vendor/riscv-isa-sim/riscv/insns/kmmwt2.h new file mode 100644 index 00000000..73d3dc8c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmmwt2.h @@ -0,0 +1,10 @@ +require_vector_vs; +P_LOOP(32, { + if((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 1))) { + int64_t mres = ((int64_t) ps1 * P_SH(ps2, 1)) << 1; + pd = mres >> 16; + } else { + pd = INT32_MAX; + P_SET_OV(1); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmmwt2_u.h b/vendor/riscv-isa-sim/riscv/insns/kmmwt2_u.h new file mode 100644 index 00000000..1f525a8a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmmwt2_u.h @@ -0,0 +1,10 @@ +require_vector_vs; +P_LOOP(32, { + if((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 1))) { + int64_t mres = ((int64_t) ps1 * P_SH(ps2, 1)) << 1; + pd = ((mres >> 15) + 1) >> 1; + } else { + pd = INT32_MAX; + P_SET_OV(1); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmsda.h b/vendor/riscv-isa-sim/riscv/insns/kmsda.h new file mode 100644 index 00000000..94b118a2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmsda.h @@ -0,0 +1,4 @@ +require_vector_vs; +P_REDUCTION_LOOP(32, 16, true, true, { + pd_res -= ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmsda32.h b/vendor/riscv-isa-sim/riscv/insns/kmsda32.h new file mode 100644 index 00000000..d54d42c2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmsda32.h @@ -0,0 +1,10 @@ +require_vector_vs; +require_rv64; +require_extension(EXT_ZPN); + +bool sat; +sreg_t mres0 = (sreg_t)P_SW(RS1, 0) * P_SW(RS2, 0); +sreg_t mres1 = (sreg_t)P_SW(RS1, 1) * P_SW(RS2, 1); + +WRITE_RD((sat_add(RD, -mres0, -mres1, sat))); +P_SET_OV(sat); diff --git a/vendor/riscv-isa-sim/riscv/insns/kmsr64.h b/vendor/riscv-isa-sim/riscv/insns/kmsr64.h new file mode 100644 index 00000000..bfef5033 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmsr64.h @@ -0,0 +1,26 @@ +require_vector_vs; +P_64_PROFILE_BASE() +P_64_PROFILE_PARAM(true, false) + +bool sat = false; +sreg_t mres0 = -(sreg_t)P_SW(rs1, 0) * P_SW(rs2, 0); +sreg_t mres1 = -(sreg_t)P_SW(rs1, 1) * P_SW(rs2, 1); +sreg_t res; + +if (xlen == 32) { + rd = (sat_add(rd, mres0, sat)); +} else { + if ((rd ^ mres0) < 0) { + res = rd + mres0; + rd = (sat_add(res, mres1, sat)); + } else if ((rd ^ mres1) < 0) { + res = rd + mres1; + rd = (sat_add(res, mres0, sat)); + } else { + rd = (sat_add(rd, mres0, sat)); + P_SET_OV(sat); + rd = (sat_add(rd, mres1, sat)); + } +} +P_SET_OV(sat); +P_64_PROFILE_END() diff --git a/vendor/riscv-isa-sim/riscv/insns/kmsxda.h b/vendor/riscv-isa-sim/riscv/insns/kmsxda.h new file mode 100644 index 00000000..2d0faa36 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmsxda.h @@ -0,0 +1,4 @@ +require_vector_vs; +P_REDUCTION_CROSS_LOOP(32, 16, true, true, { + pd_res -= ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmsxda32.h b/vendor/riscv-isa-sim/riscv/insns/kmsxda32.h new file mode 100644 index 00000000..3006b542 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmsxda32.h @@ -0,0 +1,10 @@ +require_vector_vs; +require_rv64; +require_extension(EXT_ZPN); + +bool sat; +sreg_t mres0 = (sreg_t)P_SW(RS1, 0) * P_SW(RS2, 1); +sreg_t mres1 = (sreg_t)P_SW(RS1, 1) * P_SW(RS2, 0); + +WRITE_RD((sat_add(RD, -mres0, -mres1, sat))); +P_SET_OV(sat); diff --git a/vendor/riscv-isa-sim/riscv/insns/kmxda.h b/vendor/riscv-isa-sim/riscv/insns/kmxda.h new file mode 100644 index 00000000..4addd8a5 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmxda.h @@ -0,0 +1,4 @@ +require_vector_vs; +P_REDUCTION_CROSS_LOOP(32, 16, false, true, { + pd_res += ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmxda32.h b/vendor/riscv-isa-sim/riscv/insns/kmxda32.h new file mode 100644 index 00000000..99a8204e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmxda32.h @@ -0,0 +1,10 @@ +require_vector_vs; +require_rv64; +require_extension(EXT_ZPN); + +bool sat; +sreg_t mres0 = (sreg_t)P_SW(RS1, 0) * P_SW(RS2, 1); +sreg_t mres1 = (sreg_t)P_SW(RS1, 1) * P_SW(RS2, 0); + +WRITE_RD((sat_add(mres0, mres1, sat))); +P_SET_OV(sat); diff --git a/vendor/riscv-isa-sim/riscv/insns/ksll16.h b/vendor/riscv-isa-sim/riscv/insns/ksll16.h new file mode 100644 index 00000000..9e03b347 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ksll16.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_X_LOOP(16, 4, { + auto res = (sreg_t)ps1 << sa; + P_SAT(res, 16); + pd = res; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ksll32.h b/vendor/riscv-isa-sim/riscv/insns/ksll32.h new file mode 100644 index 00000000..35888986 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ksll32.h @@ -0,0 +1,7 @@ +require_vector_vs; +require_rv64; +P_X_LOOP(32, 5, { + auto res = (sreg_t)ps1 << sa; + P_SAT(res, 32); + pd = res; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ksll8.h b/vendor/riscv-isa-sim/riscv/insns/ksll8.h new file mode 100644 index 00000000..930ea03b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ksll8.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_X_LOOP(8, 3, { + auto res = (sreg_t)ps1 << sa; + P_SAT(res, 8); + pd = res; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kslli16.h b/vendor/riscv-isa-sim/riscv/insns/kslli16.h new file mode 100644 index 00000000..edc7c671 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kslli16.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_I_LOOP(16, 4, { + auto res = (sreg_t)ps1 << imm4u; + P_SAT(res, 16); + pd = res; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kslli32.h b/vendor/riscv-isa-sim/riscv/insns/kslli32.h new file mode 100644 index 00000000..4fd506b2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kslli32.h @@ -0,0 +1,7 @@ +require_vector_vs; +require_rv64; +P_I_LOOP(32, 5, { + auto res = (sreg_t)ps1 << imm5u; + P_SAT(res, 32); + pd = res; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kslli8.h b/vendor/riscv-isa-sim/riscv/insns/kslli8.h new file mode 100644 index 00000000..18d714f9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kslli8.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_I_LOOP(8, 3, { + auto res = (sreg_t)ps1 << imm3u; + P_SAT(res, 8); + pd = res; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kslliw.h b/vendor/riscv-isa-sim/riscv/insns/kslliw.h new file mode 100644 index 00000000..8902d3a3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kslliw.h @@ -0,0 +1,8 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t rs1 = sext32(RS1); +sreg_t sa = insn.p_imm5(); +sreg_t res = rs1 << sa; + +P_SAT(res, 32); +WRITE_RD(sext32(res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/ksllw.h b/vendor/riscv-isa-sim/riscv/insns/ksllw.h new file mode 100644 index 00000000..7e8452f8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ksllw.h @@ -0,0 +1,8 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t rs1 = sext32(RS1); +sreg_t sa = get_field(RS2, make_mask64(0, 5)); +sreg_t res = rs1 << sa; + +P_SAT(res, 32); +WRITE_RD(sext32(res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/kslra16.h b/vendor/riscv-isa-sim/riscv/insns/kslra16.h new file mode 100644 index 00000000..ad1443a2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kslra16.h @@ -0,0 +1,12 @@ +require_vector_vs; +P_X_LOOP(16, 5, { + if (ssa < 0) { + sa = -ssa; + sa = (sa == 16) ? 15 : sa; + pd = ps1 >> sa; + } else { + auto res = (sreg_t)ps1 << ssa; + P_SAT(res, 16); + pd = res; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kslra16_u.h b/vendor/riscv-isa-sim/riscv/insns/kslra16_u.h new file mode 100644 index 00000000..8335f3e8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kslra16_u.h @@ -0,0 +1,15 @@ +require_vector_vs; +P_X_LOOP(16, 5, { + if (ssa < 0) { + sa = -ssa; + sa = (sa == 16) ? 15 : sa; + if(sa != 0) + pd = ((ps1 >> (sa - 1)) + 1) >> 1; + else + pd = ps1; + } else { + auto res = (sreg_t)ps1 << ssa; + P_SAT(res, 16); + pd = res; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kslra32.h b/vendor/riscv-isa-sim/riscv/insns/kslra32.h new file mode 100644 index 00000000..871d6011 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kslra32.h @@ -0,0 +1,13 @@ +require_vector_vs; +require_rv64; +P_X_LOOP(32, 6, { + if (ssa < 0) { + sa = -ssa; + sa = (sa == 32) ? 31 : sa; + pd = ps1 >> sa; + } else { + auto res = (sreg_t)ps1 << ssa; + P_SAT(res, 32); + pd = res; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kslra32_u.h b/vendor/riscv-isa-sim/riscv/insns/kslra32_u.h new file mode 100644 index 00000000..d53c8fe1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kslra32_u.h @@ -0,0 +1,16 @@ +require_vector_vs; +require_rv64; +P_X_LOOP(32, 6, { + if (ssa < 0) { + sa = -ssa; + sa = (sa == 32) ? 31 : sa; + if(sa != 0) + pd = ((ps1 >> (sa - 1)) + 1) >> 1; + else + pd = ps1; + } else { + auto res = (sreg_t)ps1 << ssa; + P_SAT(res, 32); + pd = res; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kslra8.h b/vendor/riscv-isa-sim/riscv/insns/kslra8.h new file mode 100644 index 00000000..b3f3e6b8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kslra8.h @@ -0,0 +1,12 @@ +require_vector_vs; +P_X_LOOP(8, 4, { + if (ssa < 0) { + sa = -ssa; + sa = (sa == 8) ? 7 : sa; + pd = ps1 >> sa; + } else { + auto res = (sreg_t)ps1 << ssa; + P_SAT(res, 8); + pd = res; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kslra8_u.h b/vendor/riscv-isa-sim/riscv/insns/kslra8_u.h new file mode 100644 index 00000000..620f3bd3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kslra8_u.h @@ -0,0 +1,15 @@ +require_vector_vs; +P_X_LOOP(8, 4, { + if (ssa < 0) { + sa = -ssa; + sa = (sa == 8) ? 7 : sa; + if(sa != 0) + pd = ((ps1 >> (sa - 1)) + 1) >> 1; + else + pd = ps1; + } else { + auto res = (sreg_t)ps1 << ssa; + P_SAT(res, 8); + pd = res; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kslraw.h b/vendor/riscv-isa-sim/riscv/insns/kslraw.h new file mode 100644 index 00000000..fa4c8443 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kslraw.h @@ -0,0 +1,14 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t rs1 = sext32(RS1); +sreg_t sa = int64_t(RS2) << (64 - 6) >> (64 - 6); + +if (sa < 0) { + sa = -sa; + sa = (sa == 32) ? 31 : sa; + WRITE_RD(sext32(rs1 >> sa)); +} else { + auto res = rs1 << sa; + P_SAT(res, 32); + WRITE_RD(sext32(res)); +} diff --git a/vendor/riscv-isa-sim/riscv/insns/kslraw_u.h b/vendor/riscv-isa-sim/riscv/insns/kslraw_u.h new file mode 100644 index 00000000..ebecb615 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kslraw_u.h @@ -0,0 +1,14 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t rs1 = sext32(RS1); +sreg_t sa = int64_t(RS2) << (64 - 6) >> (64 - 6); + +if (sa < 0) { + sa = -sa; + sa = (sa == 32) ? 31 : sa; + WRITE_RD(sext32(((rs1 >> (sa - 1)) + 1)) >> 1); +} else { + auto res = rs1 << sa; + P_SAT(res, 32); + WRITE_RD(sext32(res)); +} diff --git a/vendor/riscv-isa-sim/riscv/insns/kstas16.h b/vendor/riscv-isa-sim/riscv/insns/kstas16.h new file mode 100644 index 00000000..ad180131 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kstas16.h @@ -0,0 +1,10 @@ +require_vector_vs; +P_STRAIGHT_ULOOP(16, { + bool sat = false; + pd = (sat_add(ps1, ps2, sat)); + P_SET_OV(sat); +}, { + bool sat = false; + pd = (sat_sub(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kstas32.h b/vendor/riscv-isa-sim/riscv/insns/kstas32.h new file mode 100644 index 00000000..35f23e03 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kstas32.h @@ -0,0 +1,11 @@ +require_vector_vs; +require_rv64; +P_STRAIGHT_ULOOP(32, { + bool sat = false; + pd = (sat_add(ps1, ps2, sat)); + P_SET_OV(sat); +}, { + bool sat = false; + pd = (sat_sub(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kstsa16.h b/vendor/riscv-isa-sim/riscv/insns/kstsa16.h new file mode 100644 index 00000000..47a8918b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kstsa16.h @@ -0,0 +1,10 @@ +require_vector_vs; +P_STRAIGHT_ULOOP(16, { + bool sat = false; + pd = (sat_sub(ps1, ps2, sat)); + P_SET_OV(sat); +}, { + bool sat = false; + pd = (sat_add(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kstsa32.h b/vendor/riscv-isa-sim/riscv/insns/kstsa32.h new file mode 100644 index 00000000..aa9c372f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kstsa32.h @@ -0,0 +1,11 @@ +require_vector_vs; +require_rv64; +P_STRAIGHT_ULOOP(32, { + bool sat = false; + pd = (sat_sub(ps1, ps2, sat)); + P_SET_OV(sat); +}, { + bool sat = false; + pd = (sat_add(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ksub16.h b/vendor/riscv-isa-sim/riscv/insns/ksub16.h new file mode 100644 index 00000000..57562b5d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ksub16.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_LOOP(16, { + bool sat = false; + pd = (sat_sub(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ksub32.h b/vendor/riscv-isa-sim/riscv/insns/ksub32.h new file mode 100644 index 00000000..3ef32e87 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ksub32.h @@ -0,0 +1,7 @@ +require_vector_vs; +require_rv64; +P_LOOP(32, { + bool sat = false; + pd = (sat_sub(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ksub64.h b/vendor/riscv-isa-sim/riscv/insns/ksub64.h new file mode 100644 index 00000000..c6f09948 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ksub64.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_64_PROFILE({ + bool sat = false; + rd = (sat_sub(rs1, rs2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ksub8.h b/vendor/riscv-isa-sim/riscv/insns/ksub8.h new file mode 100644 index 00000000..705f6329 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ksub8.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_LOOP(8, { + bool sat = false; + pd = (sat_sub(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ksubh.h b/vendor/riscv-isa-sim/riscv/insns/ksubh.h new file mode 100644 index 00000000..2455c161 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ksubh.h @@ -0,0 +1,5 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t res = (sreg_t)P_SH(RS1, 0) - (sreg_t)P_SH(RS2, 0); +P_SAT(res, 16); +WRITE_RD(sext_xlen((int16_t)res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/ksubw.h b/vendor/riscv-isa-sim/riscv/insns/ksubw.h new file mode 100644 index 00000000..3a3d7806 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ksubw.h @@ -0,0 +1,5 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t res = (sreg_t)P_SW(RS1, 0) - (sreg_t)P_SW(RS2, 0); +P_SAT(res, 32); +WRITE_RD(sext32(res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/kwmmul.h b/vendor/riscv-isa-sim/riscv/insns/kwmmul.h new file mode 100644 index 00000000..b0ab8d4d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kwmmul.h @@ -0,0 +1,10 @@ +require_vector_vs; +P_LOOP(32, { + if((INT32_MIN != ps1) | (INT32_MIN != ps2)) { + int64_t mres = ((int64_t) ps1 * (int64_t) ps2) << 1; + pd = mres >> 32; + } else { + pd = INT32_MAX; + P_SET_OV(1); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kwmmul_u.h b/vendor/riscv-isa-sim/riscv/insns/kwmmul_u.h new file mode 100644 index 00000000..c2045e19 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kwmmul_u.h @@ -0,0 +1,10 @@ +require_vector_vs; +P_LOOP(32, { + if((INT32_MIN != ps1) | (INT32_MIN != ps2)) { + int64_t mres = ((int64_t) ps1 * (int64_t) ps2) << 1; + pd = ((mres >> 31) + 1) >> 1; + } else { + pd = INT32_MAX; + P_SET_OV(1); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/lb.h b/vendor/riscv-isa-sim/riscv/insns/lb.h new file mode 100644 index 00000000..0f0999ca --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/lb.h @@ -0,0 +1 @@ +WRITE_RD(MMU.load_int8(RS1 + insn.i_imm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/lbu.h b/vendor/riscv-isa-sim/riscv/insns/lbu.h new file mode 100644 index 00000000..64d4a688 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/lbu.h @@ -0,0 +1 @@ +WRITE_RD(MMU.load_uint8(RS1 + insn.i_imm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/ld.h b/vendor/riscv-isa-sim/riscv/insns/ld.h new file mode 100644 index 00000000..1122b980 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ld.h @@ -0,0 +1,2 @@ +require_rv64; +WRITE_RD(MMU.load_int64(RS1 + insn.i_imm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/lh.h b/vendor/riscv-isa-sim/riscv/insns/lh.h new file mode 100644 index 00000000..0d458e0e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/lh.h @@ -0,0 +1 @@ +WRITE_RD(MMU.load_int16(RS1 + insn.i_imm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/lhu.h b/vendor/riscv-isa-sim/riscv/insns/lhu.h new file mode 100644 index 00000000..9d240702 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/lhu.h @@ -0,0 +1 @@ +WRITE_RD(MMU.load_uint16(RS1 + insn.i_imm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/lr_d.h b/vendor/riscv-isa-sim/riscv/insns/lr_d.h new file mode 100644 index 00000000..6dd8d672 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/lr_d.h @@ -0,0 +1,5 @@ +require_extension('A'); +require_rv64; +auto res = MMU.load_int64(RS1, true); +MMU.acquire_load_reservation(RS1); +WRITE_RD(res); diff --git a/vendor/riscv-isa-sim/riscv/insns/lr_w.h b/vendor/riscv-isa-sim/riscv/insns/lr_w.h new file mode 100644 index 00000000..185be53b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/lr_w.h @@ -0,0 +1,4 @@ +require_extension('A'); +auto res = MMU.load_int32(RS1, true); +MMU.acquire_load_reservation(RS1); +WRITE_RD(res); diff --git a/vendor/riscv-isa-sim/riscv/insns/lui.h b/vendor/riscv-isa-sim/riscv/insns/lui.h new file mode 100644 index 00000000..c7b5264e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/lui.h @@ -0,0 +1 @@ +WRITE_RD(insn.u_imm()); diff --git a/vendor/riscv-isa-sim/riscv/insns/lw.h b/vendor/riscv-isa-sim/riscv/insns/lw.h new file mode 100644 index 00000000..4e8ed040 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/lw.h @@ -0,0 +1 @@ +WRITE_RD(MMU.load_int32(RS1 + insn.i_imm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/lwu.h b/vendor/riscv-isa-sim/riscv/insns/lwu.h new file mode 100644 index 00000000..dcc4d75b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/lwu.h @@ -0,0 +1,2 @@ +require_rv64; +WRITE_RD(MMU.load_uint32(RS1 + insn.i_imm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/maddr32.h b/vendor/riscv-isa-sim/riscv/insns/maddr32.h new file mode 100644 index 00000000..943aeac9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/maddr32.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZPN); +reg_t mres = (reg_t)P_W(RS1, 0) * P_W(RS2, 0); +reg_t rd = P_W(RD, 0); +rd += mres; +WRITE_RD(sext_xlen((int32_t)rd)); diff --git a/vendor/riscv-isa-sim/riscv/insns/max.h b/vendor/riscv-isa-sim/riscv/insns/max.h new file mode 100644 index 00000000..073b8df2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/max.h @@ -0,0 +1,2 @@ +require_either_extension(EXT_ZBPBO, EXT_ZBB); +WRITE_RD(sext_xlen(sreg_t(RS1) > sreg_t(RS2) ? RS1 : RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/maxu.h b/vendor/riscv-isa-sim/riscv/insns/maxu.h new file mode 100644 index 00000000..05af4925 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/maxu.h @@ -0,0 +1,2 @@ +require_extension(EXT_ZBB); +WRITE_RD(sext_xlen(RS1 > RS2 ? RS1 : RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/min.h b/vendor/riscv-isa-sim/riscv/insns/min.h new file mode 100644 index 00000000..47bc993c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/min.h @@ -0,0 +1,2 @@ +require_either_extension(EXT_ZBPBO, EXT_ZBB); +WRITE_RD(sext_xlen(sreg_t(RS1) < sreg_t(RS2) ? RS1 : RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/minu.h b/vendor/riscv-isa-sim/riscv/insns/minu.h new file mode 100644 index 00000000..7582c0d1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/minu.h @@ -0,0 +1,2 @@ +require_extension(EXT_ZBB); +WRITE_RD(sext_xlen(RS1 < RS2 ? RS1 : RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/mret.h b/vendor/riscv-isa-sim/riscv/insns/mret.h new file mode 100644 index 00000000..5198b8fc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/mret.h @@ -0,0 +1,14 @@ +require_privilege(PRV_M); +set_pc_and_serialize(p->get_state()->mepc->read()); +reg_t s = STATE.mstatus->read(); +reg_t prev_prv = get_field(s, MSTATUS_MPP); +reg_t prev_virt = get_field(s, MSTATUS_MPV); +if (prev_prv != PRV_M) + s = set_field(s, MSTATUS_MPRV, 0); +s = set_field(s, MSTATUS_MIE, get_field(s, MSTATUS_MPIE)); +s = set_field(s, MSTATUS_MPIE, 1); +s = set_field(s, MSTATUS_MPP, p->extension_enabled('U') ? PRV_U : PRV_M); +s = set_field(s, MSTATUS_MPV, 0); +p->put_csr(CSR_MSTATUS, s); +p->set_privilege(prev_prv); +p->set_virt(prev_virt); diff --git a/vendor/riscv-isa-sim/riscv/insns/msubr32.h b/vendor/riscv-isa-sim/riscv/insns/msubr32.h new file mode 100644 index 00000000..2086bd19 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/msubr32.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZPN); +reg_t mres = (reg_t)P_W(RS1, 0) * P_W(RS2, 0); +reg_t rd = P_W(RD, 0); +rd -= mres; +WRITE_RD(sext_xlen((int32_t)rd)); diff --git a/vendor/riscv-isa-sim/riscv/insns/mul.h b/vendor/riscv-isa-sim/riscv/insns/mul.h new file mode 100644 index 00000000..52d00225 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/mul.h @@ -0,0 +1,2 @@ +require_either_extension('M', EXT_ZMMUL); +WRITE_RD(sext_xlen(RS1 * RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/mulh.h b/vendor/riscv-isa-sim/riscv/insns/mulh.h new file mode 100644 index 00000000..a8f67d12 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/mulh.h @@ -0,0 +1,5 @@ +require_either_extension('M', EXT_ZMMUL); +if (xlen == 64) + WRITE_RD(mulh(RS1, RS2)); +else + WRITE_RD(sext32((sext32(RS1) * sext32(RS2)) >> 32)); diff --git a/vendor/riscv-isa-sim/riscv/insns/mulhsu.h b/vendor/riscv-isa-sim/riscv/insns/mulhsu.h new file mode 100644 index 00000000..cb5caa4e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/mulhsu.h @@ -0,0 +1,5 @@ +require_either_extension('M', EXT_ZMMUL); +if (xlen == 64) + WRITE_RD(mulhsu(RS1, RS2)); +else + WRITE_RD(sext32((sext32(RS1) * reg_t((uint32_t)RS2)) >> 32)); diff --git a/vendor/riscv-isa-sim/riscv/insns/mulhu.h b/vendor/riscv-isa-sim/riscv/insns/mulhu.h new file mode 100644 index 00000000..9ce751e3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/mulhu.h @@ -0,0 +1,5 @@ +require_either_extension('M', EXT_ZMMUL); +if (xlen == 64) + WRITE_RD(mulhu(RS1, RS2)); +else + WRITE_RD(sext32(((uint64_t)(uint32_t)RS1 * (uint64_t)(uint32_t)RS2) >> 32)); diff --git a/vendor/riscv-isa-sim/riscv/insns/mulr64.h b/vendor/riscv-isa-sim/riscv/insns/mulr64.h new file mode 100644 index 00000000..4e2aad75 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/mulr64.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZPSFOPERAND); +reg_t rd = (reg_t)P_W(RS1, 0) * P_W(RS2, 0); +P_64_PROFILE_END(); diff --git a/vendor/riscv-isa-sim/riscv/insns/mulsr64.h b/vendor/riscv-isa-sim/riscv/insns/mulsr64.h new file mode 100644 index 00000000..a2a51156 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/mulsr64.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZPSFOPERAND); +sreg_t rd = (sreg_t)P_SW(RS1, 0) * P_SW(RS2, 0); +P_64_PROFILE_END(); diff --git a/vendor/riscv-isa-sim/riscv/insns/mulw.h b/vendor/riscv-isa-sim/riscv/insns/mulw.h new file mode 100644 index 00000000..20108d84 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/mulw.h @@ -0,0 +1,3 @@ +require_either_extension('M', EXT_ZMMUL); +require_rv64; +WRITE_RD(sext32(RS1 * RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/or.h b/vendor/riscv-isa-sim/riscv/insns/or.h new file mode 100644 index 00000000..3f2fffc2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/or.h @@ -0,0 +1 @@ +WRITE_RD(RS1 | RS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/ori.h b/vendor/riscv-isa-sim/riscv/insns/ori.h new file mode 100644 index 00000000..3aba1cb2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ori.h @@ -0,0 +1,2 @@ +// prefetch.i/r/w hint when rd = 0 and i_imm[4:0] = 0/1/3 +WRITE_RD(insn.i_imm() | RS1); diff --git a/vendor/riscv-isa-sim/riscv/insns/orn.h b/vendor/riscv-isa-sim/riscv/insns/orn.h new file mode 100644 index 00000000..c1c9fd4d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/orn.h @@ -0,0 +1,2 @@ +require_either_extension(EXT_ZBB, EXT_ZBKB); +WRITE_RD(RS1 | ~RS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/pack.h b/vendor/riscv-isa-sim/riscv/insns/pack.h new file mode 100644 index 00000000..2140f918 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/pack.h @@ -0,0 +1,11 @@ +// RV32Zbb contains zext.h but not general pack +require(((xlen == 32) && (insn.rs2() == 0) && p->extension_enabled(EXT_ZBB)) + || p->extension_enabled(EXT_ZPN) + || p->extension_enabled(EXT_ZBKB) + || p->extension_enabled(EXT_XZBP) + || p->extension_enabled(EXT_XZBE) + || p->extension_enabled(EXT_XZBF) + || ((xlen == 64) && p->extension_enabled(EXT_XZBM))); +reg_t lo = zext_xlen(RS1 << (xlen/2)) >> (xlen/2); +reg_t hi = zext_xlen(RS2 << (xlen/2)); +WRITE_RD(sext_xlen(lo | hi)); diff --git a/vendor/riscv-isa-sim/riscv/insns/packh.h b/vendor/riscv-isa-sim/riscv/insns/packh.h new file mode 100644 index 00000000..82886e32 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/packh.h @@ -0,0 +1,7 @@ +require(p->extension_enabled(EXT_ZBKB) || + p->extension_enabled(EXT_XZBP) || + p->extension_enabled(EXT_XZBE) || + p->extension_enabled(EXT_XZBF)); +reg_t lo = zext_xlen(RS1 << (xlen-8)) >> (xlen-8); +reg_t hi = zext_xlen(RS2 << (xlen-8)) >> (xlen-16); +WRITE_RD(sext_xlen(lo | hi)); diff --git a/vendor/riscv-isa-sim/riscv/insns/packu.h b/vendor/riscv-isa-sim/riscv/insns/packu.h new file mode 100644 index 00000000..441207c3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/packu.h @@ -0,0 +1,6 @@ +require(p->extension_enabled(EXT_ZPN) || + p->extension_enabled(EXT_XZBP) || + ((xlen == 64) && p->extension_enabled(EXT_XZBM))); +reg_t lo = zext_xlen(RS1) >> (xlen/2); +reg_t hi = zext_xlen(RS2) >> (xlen/2) << (xlen/2); +WRITE_RD(sext_xlen(lo | hi)); diff --git a/vendor/riscv-isa-sim/riscv/insns/packuw.h b/vendor/riscv-isa-sim/riscv/insns/packuw.h new file mode 100644 index 00000000..1b3f7d5f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/packuw.h @@ -0,0 +1,5 @@ +require_rv64; +require_extension(EXT_XZBP); +reg_t lo = zext32(RS1) >> 16; +reg_t hi = zext32(RS2) >> 16 << 16; +WRITE_RD(sext32(lo | hi)); diff --git a/vendor/riscv-isa-sim/riscv/insns/packw.h b/vendor/riscv-isa-sim/riscv/insns/packw.h new file mode 100644 index 00000000..084c190d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/packw.h @@ -0,0 +1,10 @@ +// RV64Zbb contains zext.h but not general packw +require(((insn.rs2() == 0) && p->extension_enabled(EXT_ZBB)) + || p->extension_enabled(EXT_ZBKB) + || p->extension_enabled(EXT_XZBP) + || p->extension_enabled(EXT_XZBE) + || p->extension_enabled(EXT_XZBF)); +require_rv64; +reg_t lo = zext32(RS1 << 16) >> 16; +reg_t hi = zext32(RS2 << 16); +WRITE_RD(sext32(lo | hi)); diff --git a/vendor/riscv-isa-sim/riscv/insns/pbsad.h b/vendor/riscv-isa-sim/riscv/insns/pbsad.h new file mode 100644 index 00000000..32789ef8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/pbsad.h @@ -0,0 +1,3 @@ +P_REDUCTION_ULOOP(64, 8, false, false, { + pd_res += (ps1 > ps2 ? ps1 - ps2 : ps2 - ps1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/pbsada.h b/vendor/riscv-isa-sim/riscv/insns/pbsada.h new file mode 100644 index 00000000..cab988ed --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/pbsada.h @@ -0,0 +1,3 @@ +P_REDUCTION_ULOOP(64, 8, true, false, { + pd_res += (ps1 > ps2 ? ps1 - ps2 : ps2 - ps1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/pkbb16.h b/vendor/riscv-isa-sim/riscv/insns/pkbb16.h new file mode 100644 index 00000000..20dcde61 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/pkbb16.h @@ -0,0 +1,2 @@ +require_rv64; +P_PK(16, 0, 0); diff --git a/vendor/riscv-isa-sim/riscv/insns/pkbt16.h b/vendor/riscv-isa-sim/riscv/insns/pkbt16.h new file mode 100644 index 00000000..8c51ab7d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/pkbt16.h @@ -0,0 +1 @@ +P_PK(16, 0, 1); diff --git a/vendor/riscv-isa-sim/riscv/insns/pkbt32.h b/vendor/riscv-isa-sim/riscv/insns/pkbt32.h new file mode 100644 index 00000000..2783d980 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/pkbt32.h @@ -0,0 +1,2 @@ +require_rv64; +P_PK(32, 0, 1); diff --git a/vendor/riscv-isa-sim/riscv/insns/pktb16.h b/vendor/riscv-isa-sim/riscv/insns/pktb16.h new file mode 100644 index 00000000..c49c1ed3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/pktb16.h @@ -0,0 +1 @@ +P_PK(16, 1, 0); diff --git a/vendor/riscv-isa-sim/riscv/insns/pktb32.h b/vendor/riscv-isa-sim/riscv/insns/pktb32.h new file mode 100644 index 00000000..0a7e17f4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/pktb32.h @@ -0,0 +1,2 @@ +require_rv64; +P_PK(32, 1, 0); diff --git a/vendor/riscv-isa-sim/riscv/insns/pktt16.h b/vendor/riscv-isa-sim/riscv/insns/pktt16.h new file mode 100644 index 00000000..b263ed40 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/pktt16.h @@ -0,0 +1,2 @@ +require_rv64; +P_PK(16, 1, 1); diff --git a/vendor/riscv-isa-sim/riscv/insns/radd16.h b/vendor/riscv-isa-sim/riscv/insns/radd16.h new file mode 100644 index 00000000..8f800502 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/radd16.h @@ -0,0 +1,3 @@ +P_LOOP(16, { + pd = (ps1 + ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/radd32.h b/vendor/riscv-isa-sim/riscv/insns/radd32.h new file mode 100644 index 00000000..df50dd17 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/radd32.h @@ -0,0 +1,4 @@ +require_rv64; +P_LOOP(32, { + pd = ((int64_t)ps1 + ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/radd64.h b/vendor/riscv-isa-sim/riscv/insns/radd64.h new file mode 100644 index 00000000..110c472c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/radd64.h @@ -0,0 +1,8 @@ +P_64_PROFILE({ + rd = (rs1 + rs2) >> 1; + if (rs1 > 0 && rs2 > 0) { + rd &= ~((reg_t)1 << 63); + } else if (rs1 < 0 && rs2 < 0) { + rd |= ((reg_t)1 << 63); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/radd8.h b/vendor/riscv-isa-sim/riscv/insns/radd8.h new file mode 100644 index 00000000..ad0b6ece --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/radd8.h @@ -0,0 +1,3 @@ +P_LOOP(8, { + pd = (ps1 + ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/raddw.h b/vendor/riscv-isa-sim/riscv/insns/raddw.h new file mode 100644 index 00000000..ec04bb6d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/raddw.h @@ -0,0 +1,4 @@ +require_extension(EXT_ZPN); +sreg_t res = (sreg_t)P_SW(RS1, 0) + (sreg_t)P_SW(RS2, 0); +res >>= 1; +WRITE_RD(sext_xlen(res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/rcras16.h b/vendor/riscv-isa-sim/riscv/insns/rcras16.h new file mode 100644 index 00000000..529c27fe --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rcras16.h @@ -0,0 +1,5 @@ +P_CROSS_LOOP(16, { + pd = (ps1 + ps2) >> 1; +}, { + pd = (ps1 - ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/rcras32.h b/vendor/riscv-isa-sim/riscv/insns/rcras32.h new file mode 100644 index 00000000..86a3f65e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rcras32.h @@ -0,0 +1,6 @@ +require_rv64; +P_CROSS_LOOP(32, { + pd = ((int64_t)ps1 + ps2) >> 1; +}, { + pd = ((int64_t)ps1 - ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/rcrsa16.h b/vendor/riscv-isa-sim/riscv/insns/rcrsa16.h new file mode 100644 index 00000000..156e32c4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rcrsa16.h @@ -0,0 +1,5 @@ +P_CROSS_LOOP(16, { + pd = (ps1 - ps2) >> 1; +}, { + pd = (ps1 + ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/rcrsa32.h b/vendor/riscv-isa-sim/riscv/insns/rcrsa32.h new file mode 100644 index 00000000..b45f31fe --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rcrsa32.h @@ -0,0 +1,6 @@ +require_rv64; +P_CROSS_LOOP(32, { + pd = ((uint64_t)ps1 - ps2) >> 1; +}, { + pd = ((uint64_t)ps1 + ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/rem.h b/vendor/riscv-isa-sim/riscv/insns/rem.h new file mode 100644 index 00000000..85879957 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rem.h @@ -0,0 +1,9 @@ +require_extension('M'); +sreg_t lhs = sext_xlen(RS1); +sreg_t rhs = sext_xlen(RS2); +if(rhs == 0) + WRITE_RD(lhs); +else if(lhs == INT64_MIN && rhs == -1) + WRITE_RD(0); +else + WRITE_RD(sext_xlen(lhs % rhs)); diff --git a/vendor/riscv-isa-sim/riscv/insns/remu.h b/vendor/riscv-isa-sim/riscv/insns/remu.h new file mode 100644 index 00000000..e74774cc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/remu.h @@ -0,0 +1,7 @@ +require_extension('M'); +reg_t lhs = zext_xlen(RS1); +reg_t rhs = zext_xlen(RS2); +if(rhs == 0) + WRITE_RD(sext_xlen(RS1)); +else + WRITE_RD(sext_xlen(lhs % rhs)); diff --git a/vendor/riscv-isa-sim/riscv/insns/remuw.h b/vendor/riscv-isa-sim/riscv/insns/remuw.h new file mode 100644 index 00000000..b239c8f3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/remuw.h @@ -0,0 +1,8 @@ +require_extension('M'); +require_rv64; +reg_t lhs = zext32(RS1); +reg_t rhs = zext32(RS2); +if(rhs == 0) + WRITE_RD(sext32(lhs)); +else + WRITE_RD(sext32(lhs % rhs)); diff --git a/vendor/riscv-isa-sim/riscv/insns/remw.h b/vendor/riscv-isa-sim/riscv/insns/remw.h new file mode 100644 index 00000000..56221ccd --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/remw.h @@ -0,0 +1,8 @@ +require_extension('M'); +require_rv64; +sreg_t lhs = sext32(RS1); +sreg_t rhs = sext32(RS2); +if(rhs == 0) + WRITE_RD(lhs); +else + WRITE_RD(sext32(lhs % rhs)); diff --git a/vendor/riscv-isa-sim/riscv/insns/rol.h b/vendor/riscv-isa-sim/riscv/insns/rol.h new file mode 100644 index 00000000..07735a1b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rol.h @@ -0,0 +1,4 @@ +require_either_extension(EXT_ZBB, EXT_ZBKB); +int shamt = RS2 & (xlen-1); +int rshamt = -shamt & (xlen-1); +WRITE_RD(sext_xlen((RS1 << shamt) | (zext_xlen(RS1) >> rshamt))); diff --git a/vendor/riscv-isa-sim/riscv/insns/rolw.h b/vendor/riscv-isa-sim/riscv/insns/rolw.h new file mode 100644 index 00000000..4d5eeb19 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rolw.h @@ -0,0 +1,5 @@ +require_rv64; +require_either_extension(EXT_ZBB, EXT_ZBKB); +int shamt = RS2 & 31; +int rshamt = -shamt & 31; +WRITE_RD(sext32((RS1 << shamt) | (zext32(RS1) >> rshamt))); diff --git a/vendor/riscv-isa-sim/riscv/insns/ror.h b/vendor/riscv-isa-sim/riscv/insns/ror.h new file mode 100644 index 00000000..61b5ff8f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ror.h @@ -0,0 +1,4 @@ +require_either_extension(EXT_ZBB, EXT_ZBKB); +int shamt = RS2 & (xlen-1); +int rshamt = -shamt & (xlen-1); +WRITE_RD(sext_xlen((RS1 << rshamt) | (zext_xlen(RS1) >> shamt))); diff --git a/vendor/riscv-isa-sim/riscv/insns/rori.h b/vendor/riscv-isa-sim/riscv/insns/rori.h new file mode 100644 index 00000000..6585b60f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rori.h @@ -0,0 +1,5 @@ +require_either_extension(EXT_ZBB, EXT_ZBKB); +require(SHAMT < xlen); +int shamt = SHAMT & (xlen-1); +int rshamt = -shamt & (xlen-1); +WRITE_RD(sext_xlen((RS1 << rshamt) | (zext_xlen(RS1) >> shamt))); diff --git a/vendor/riscv-isa-sim/riscv/insns/roriw.h b/vendor/riscv-isa-sim/riscv/insns/roriw.h new file mode 100644 index 00000000..331d2264 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/roriw.h @@ -0,0 +1,6 @@ +require_rv64; +require_either_extension(EXT_ZBB, EXT_ZBKB); +require(SHAMT < 32); +int shamt = SHAMT & 31; +int rshamt = -shamt & 31; +WRITE_RD(sext32((RS1 << rshamt) | (zext32(RS1) >> shamt))); diff --git a/vendor/riscv-isa-sim/riscv/insns/rorw.h b/vendor/riscv-isa-sim/riscv/insns/rorw.h new file mode 100644 index 00000000..65f0078c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rorw.h @@ -0,0 +1,5 @@ +require_rv64; +require_either_extension(EXT_ZBB, EXT_ZBKB); +int shamt = RS2 & 31; +int rshamt = -shamt & 31; +WRITE_RD(sext32((RS1 << rshamt) | (zext32(RS1) >> shamt))); diff --git a/vendor/riscv-isa-sim/riscv/insns/rstas16.h b/vendor/riscv-isa-sim/riscv/insns/rstas16.h new file mode 100644 index 00000000..298b5917 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rstas16.h @@ -0,0 +1,5 @@ +P_STRAIGHT_LOOP(16, { + pd = (ps1 + ps2) >> 1; +}, { + pd = (ps1 - ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/rstas32.h b/vendor/riscv-isa-sim/riscv/insns/rstas32.h new file mode 100644 index 00000000..9c8995a8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rstas32.h @@ -0,0 +1,6 @@ +require_rv64; +P_STRAIGHT_LOOP(32, { + pd = ((int64_t)ps1 + ps2) >> 1; +}, { + pd = ((int64_t)ps1 - ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/rstsa16.h b/vendor/riscv-isa-sim/riscv/insns/rstsa16.h new file mode 100644 index 00000000..443e4cef --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rstsa16.h @@ -0,0 +1,5 @@ +P_STRAIGHT_LOOP(16, { + pd = (ps1 - ps2) >> 1; +}, { + pd = (ps1 + ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/rstsa32.h b/vendor/riscv-isa-sim/riscv/insns/rstsa32.h new file mode 100644 index 00000000..a89fc6ea --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rstsa32.h @@ -0,0 +1,6 @@ +require_rv64; +P_STRAIGHT_LOOP(32, { + pd = ((int64_t)ps1 - ps2) >> 1; +}, { + pd = ((int64_t)ps1 + ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/rsub16.h b/vendor/riscv-isa-sim/riscv/insns/rsub16.h new file mode 100644 index 00000000..768e0677 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rsub16.h @@ -0,0 +1,3 @@ +P_LOOP(16, { + pd = (ps1 - ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/rsub32.h b/vendor/riscv-isa-sim/riscv/insns/rsub32.h new file mode 100644 index 00000000..22c31199 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rsub32.h @@ -0,0 +1,4 @@ +require_rv64; +P_LOOP(32, { + pd = ((int64_t)ps1 - ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/rsub64.h b/vendor/riscv-isa-sim/riscv/insns/rsub64.h new file mode 100644 index 00000000..397c973d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rsub64.h @@ -0,0 +1,8 @@ +P_64_PROFILE({ + rd = (rs1 - rs2) >> 1; + if (rs1 > 0 && rs2 < 0) { + rd &= ~((reg_t)1 << 63); + } else if(rs1 < 0 && rs2 > 0) { + rd |= ((reg_t)1 << 63); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/rsub8.h b/vendor/riscv-isa-sim/riscv/insns/rsub8.h new file mode 100644 index 00000000..9cf9c1a8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rsub8.h @@ -0,0 +1,3 @@ +P_LOOP(8, { + pd = (ps1 - ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/rsubw.h b/vendor/riscv-isa-sim/riscv/insns/rsubw.h new file mode 100644 index 00000000..01dec51a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rsubw.h @@ -0,0 +1,4 @@ +require_extension(EXT_ZPN); +sreg_t res = (sreg_t)P_SW(RS1, 0) - (sreg_t)P_SW(RS2, 0); +res >>= 1; +WRITE_RD(sext_xlen(res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sb.h b/vendor/riscv-isa-sim/riscv/insns/sb.h new file mode 100644 index 00000000..8729c2d4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sb.h @@ -0,0 +1 @@ +MMU.store_uint8(RS1 + insn.s_imm(), RS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/sc_d.h b/vendor/riscv-isa-sim/riscv/insns/sc_d.h new file mode 100644 index 00000000..54023ed4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sc_d.h @@ -0,0 +1,11 @@ +require_extension('A'); +require_rv64; + +bool have_reservation = MMU.check_load_reservation(RS1, 8); + +if (have_reservation) + MMU.store_uint64(RS1, RS2); + +MMU.yield_load_reservation(); + +WRITE_RD(!have_reservation); diff --git a/vendor/riscv-isa-sim/riscv/insns/sc_w.h b/vendor/riscv-isa-sim/riscv/insns/sc_w.h new file mode 100644 index 00000000..e430dcb2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sc_w.h @@ -0,0 +1,10 @@ +require_extension('A'); + +bool have_reservation = MMU.check_load_reservation(RS1, 4); + +if (have_reservation) + MMU.store_uint32(RS1, RS2); + +MMU.yield_load_reservation(); + +WRITE_RD(!have_reservation); diff --git a/vendor/riscv-isa-sim/riscv/insns/sclip16.h b/vendor/riscv-isa-sim/riscv/insns/sclip16.h new file mode 100644 index 00000000..d90ce19c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sclip16.h @@ -0,0 +1,14 @@ +require_vector_vs; +P_I_LOOP(16, 4, { + int64_t int_max = INT64_MAX >> (64 - (imm4u + 1)); + int64_t int_min = INT64_MIN >> (64 - (imm4u + 1)); + pd = ps1; + + if (ps1 > int_max) { + pd = int_max; + P_SET_OV(1); + } else if (ps1 < int_min) { + pd = int_min; + P_SET_OV(1); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/sclip32.h b/vendor/riscv-isa-sim/riscv/insns/sclip32.h new file mode 100644 index 00000000..ff1ba287 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sclip32.h @@ -0,0 +1,14 @@ +require_vector_vs; +P_I_LOOP(32, 5, { + int64_t int_max = INT64_MAX >> (64 - (imm5u + 1)); + int64_t int_min = INT64_MIN >> (64 - (imm5u + 1)); + pd = ps1; + + if (ps1 > int_max) { + pd = int_max; + P_SET_OV(1); + } else if (ps1 < int_min) { + pd = int_min; + P_SET_OV(1); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/sclip8.h b/vendor/riscv-isa-sim/riscv/insns/sclip8.h new file mode 100644 index 00000000..afd9c692 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sclip8.h @@ -0,0 +1,14 @@ +require_vector_vs; +P_I_LOOP(8, 3, { + int64_t int_max = INT64_MAX >> (64 - (imm3u + 1)); + int64_t int_min = INT64_MIN >> (64 - (imm3u + 1)); + pd = ps1; + + if (ps1 > int_max) { + pd = int_max; + P_SET_OV(1); + } else if (ps1 < int_min) { + pd = int_min; + P_SET_OV(1); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/scmple16.h b/vendor/riscv-isa-sim/riscv/insns/scmple16.h new file mode 100644 index 00000000..060c04c2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/scmple16.h @@ -0,0 +1,3 @@ +P_LOOP(16, { + pd = (ps1 <= ps2) ? -1 : 0; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/scmple8.h b/vendor/riscv-isa-sim/riscv/insns/scmple8.h new file mode 100644 index 00000000..8920c1f5 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/scmple8.h @@ -0,0 +1,3 @@ +P_LOOP(8, { + pd = (ps1 <= ps2) ? -1 : 0; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/scmplt16.h b/vendor/riscv-isa-sim/riscv/insns/scmplt16.h new file mode 100644 index 00000000..db62f6f7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/scmplt16.h @@ -0,0 +1,3 @@ +P_LOOP(16, { + pd = (ps1 < ps2) ? -1 : 0; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/scmplt8.h b/vendor/riscv-isa-sim/riscv/insns/scmplt8.h new file mode 100644 index 00000000..7d072097 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/scmplt8.h @@ -0,0 +1,3 @@ +P_LOOP(8, { + pd = (ps1 < ps2) ? -1 : 0; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/sd.h b/vendor/riscv-isa-sim/riscv/insns/sd.h new file mode 100644 index 00000000..664deb2c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sd.h @@ -0,0 +1,2 @@ +require_rv64; +MMU.store_uint64(RS1 + insn.s_imm(), RS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/sext_b.h b/vendor/riscv-isa-sim/riscv/insns/sext_b.h new file mode 100644 index 00000000..5acde617 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sext_b.h @@ -0,0 +1,2 @@ +require_extension(EXT_ZBB); +WRITE_RD((sreg_t)(int8_t)(RS1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sext_h.h b/vendor/riscv-isa-sim/riscv/insns/sext_h.h new file mode 100644 index 00000000..e89a68d3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sext_h.h @@ -0,0 +1,2 @@ +require_extension(EXT_ZBB); +WRITE_RD((sreg_t)(int16_t)(RS1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sfence_inval_ir.h b/vendor/riscv-isa-sim/riscv/insns/sfence_inval_ir.h new file mode 100644 index 00000000..f566d632 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sfence_inval_ir.h @@ -0,0 +1,3 @@ +require_extension('S'); +require_extension(EXT_SVINVAL); +require_impl(IMPL_MMU); diff --git a/vendor/riscv-isa-sim/riscv/insns/sfence_vma.h b/vendor/riscv-isa-sim/riscv/insns/sfence_vma.h new file mode 100644 index 00000000..7d6c01a8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sfence_vma.h @@ -0,0 +1,9 @@ +require_extension('S'); +require_impl(IMPL_MMU); +if (STATE.v) { + if (STATE.prv == PRV_U || get_field(STATE.hstatus->read(), HSTATUS_VTVM)) + require_novirt(); +} else { + require_privilege(get_field(STATE.mstatus->read(), MSTATUS_TVM) ? PRV_M : PRV_S); +} +MMU.flush_tlb(); diff --git a/vendor/riscv-isa-sim/riscv/insns/sfence_w_inval.h b/vendor/riscv-isa-sim/riscv/insns/sfence_w_inval.h new file mode 100644 index 00000000..f566d632 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sfence_w_inval.h @@ -0,0 +1,3 @@ +require_extension('S'); +require_extension(EXT_SVINVAL); +require_impl(IMPL_MMU); diff --git a/vendor/riscv-isa-sim/riscv/insns/sh.h b/vendor/riscv-isa-sim/riscv/insns/sh.h new file mode 100644 index 00000000..22aa3a88 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sh.h @@ -0,0 +1 @@ +MMU.store_uint16(RS1 + insn.s_imm(), RS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/sh1add.h b/vendor/riscv-isa-sim/riscv/insns/sh1add.h new file mode 100644 index 00000000..6cbc3605 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sh1add.h @@ -0,0 +1,2 @@ +require_extension(EXT_ZBA); +WRITE_RD(sext_xlen((RS1 << 1) + RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sh1add_uw.h b/vendor/riscv-isa-sim/riscv/insns/sh1add_uw.h new file mode 100644 index 00000000..11770026 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sh1add_uw.h @@ -0,0 +1,3 @@ +require_rv64; +require_extension(EXT_ZBA); +WRITE_RD(sext_xlen((zext32(RS1) << 1) + RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sh2add.h b/vendor/riscv-isa-sim/riscv/insns/sh2add.h new file mode 100644 index 00000000..ea55e79a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sh2add.h @@ -0,0 +1,2 @@ +require_extension(EXT_ZBA); +WRITE_RD(sext_xlen((RS1 << 2) + RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sh2add_uw.h b/vendor/riscv-isa-sim/riscv/insns/sh2add_uw.h new file mode 100644 index 00000000..b51250d9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sh2add_uw.h @@ -0,0 +1,3 @@ +require_rv64; +require_extension(EXT_ZBA); +WRITE_RD(sext_xlen((zext32(RS1) << 2) + RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sh3add.h b/vendor/riscv-isa-sim/riscv/insns/sh3add.h new file mode 100644 index 00000000..de71f0fa --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sh3add.h @@ -0,0 +1,2 @@ +require_extension(EXT_ZBA); +WRITE_RD(sext_xlen((RS1 << 3) + RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sh3add_uw.h b/vendor/riscv-isa-sim/riscv/insns/sh3add_uw.h new file mode 100644 index 00000000..b618b851 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sh3add_uw.h @@ -0,0 +1,3 @@ +require_rv64; +require_extension(EXT_ZBA); +WRITE_RD(sext_xlen((zext32(RS1) << 3) + RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sha256sig0.h b/vendor/riscv-isa-sim/riscv/insns/sha256sig0.h new file mode 100644 index 00000000..f86e42f7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sha256sig0.h @@ -0,0 +1,13 @@ + +require_extension(EXT_ZKNH); + +#define ROR32(a,amt) ((a << (-amt & (32-1))) | (a >> (amt & (32-1)))) + +uint32_t a = RS1; + +WRITE_RD( + sext32(ROR32(a, 7) ^ ROR32(a,18) ^ (a >> 3)) +); + +#undef ROR32 + diff --git a/vendor/riscv-isa-sim/riscv/insns/sha256sig1.h b/vendor/riscv-isa-sim/riscv/insns/sha256sig1.h new file mode 100644 index 00000000..72e586cb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sha256sig1.h @@ -0,0 +1,13 @@ + +require_extension(EXT_ZKNH); + +#define ROR32(a,amt) ((a << (-amt & (32-1))) | (a >> (amt & (32-1)))) + +uint32_t a = RS1; + +WRITE_RD( + sext32(ROR32(a, 17) ^ ROR32(a,19) ^ (a >> 10)) +); + +#undef ROR32 + diff --git a/vendor/riscv-isa-sim/riscv/insns/sha256sum0.h b/vendor/riscv-isa-sim/riscv/insns/sha256sum0.h new file mode 100644 index 00000000..f0aed47f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sha256sum0.h @@ -0,0 +1,13 @@ + +require_extension(EXT_ZKNH); + +#define ROR32(a,amt) ((a << (-amt & (32-1))) | (a >> (amt & (32-1)))) + +uint32_t a = RS1; + +WRITE_RD( + sext32(ROR32(a, 2) ^ ROR32(a,13) ^ ROR32(a, 22)) +); + +#undef ROR32 + diff --git a/vendor/riscv-isa-sim/riscv/insns/sha256sum1.h b/vendor/riscv-isa-sim/riscv/insns/sha256sum1.h new file mode 100644 index 00000000..41de5afe --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sha256sum1.h @@ -0,0 +1,13 @@ + +require_extension(EXT_ZKNH); + +#define ROR32(a,amt) ((a << (-amt & (32-1))) | (a >> (amt & (32-1)))) + +uint32_t a = RS1; + +WRITE_RD( + sext32(ROR32(a, 6) ^ ROR32(a,11) ^ ROR32(a, 25)) +); + +#undef ROR32 + diff --git a/vendor/riscv-isa-sim/riscv/insns/sha512sig0.h b/vendor/riscv-isa-sim/riscv/insns/sha512sig0.h new file mode 100644 index 00000000..2efd763d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sha512sig0.h @@ -0,0 +1,13 @@ +require_rv64; +require_extension(EXT_ZKNH); + +#define ROR64(a,amt) ((a << (-amt & (64-1))) | (a >> (amt & (64-1)))) + +uint64_t a = RS1; + +WRITE_RD( + ROR64(a, 1) ^ ROR64(a, 8) ^ (a >> 7) +); + +#undef ROR64 + diff --git a/vendor/riscv-isa-sim/riscv/insns/sha512sig0h.h b/vendor/riscv-isa-sim/riscv/insns/sha512sig0h.h new file mode 100644 index 00000000..eb6a2a25 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sha512sig0h.h @@ -0,0 +1,9 @@ + +require_rv32; +require_extension(EXT_ZKNH); + +reg_t result = + (zext32(RS1) >> 1) ^ (zext32(RS1) >> 7) ^ (zext32(RS1) >> 8) ^ + (zext32(RS2) << 31) ^ (zext32(RS2) << 24); + +WRITE_RD(sext_xlen(result)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sha512sig0l.h b/vendor/riscv-isa-sim/riscv/insns/sha512sig0l.h new file mode 100644 index 00000000..599a6a10 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sha512sig0l.h @@ -0,0 +1,9 @@ + +require_rv32; +require_extension(EXT_ZKNH); + +reg_t result = + (zext32(RS1) >> 1) ^ (zext32(RS1) >> 7) ^ (zext32(RS1) >> 8) ^ + (zext32(RS2) << 31) ^ (zext32(RS2) << 25) ^ (zext32(RS2) << 24); + +WRITE_RD(sext_xlen(result)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sha512sig1.h b/vendor/riscv-isa-sim/riscv/insns/sha512sig1.h new file mode 100644 index 00000000..21766541 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sha512sig1.h @@ -0,0 +1,13 @@ +require_rv64; +require_extension(EXT_ZKNH); + +#define ROR64(a,amt) ((a << (-amt & (64-1))) | (a >> (amt & (64-1)))) + +uint64_t a = RS1; + +WRITE_RD( + ROR64(a, 19) ^ ROR64(a,61) ^ (a >> 6) +); + +#undef ROR64 + diff --git a/vendor/riscv-isa-sim/riscv/insns/sha512sig1h.h b/vendor/riscv-isa-sim/riscv/insns/sha512sig1h.h new file mode 100644 index 00000000..271a1f90 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sha512sig1h.h @@ -0,0 +1,9 @@ + +require_rv32; +require_extension(EXT_ZKNH); + +reg_t result = + (zext32(RS1) << 3) ^ (zext32(RS1) >> 6) ^ (zext32(RS1) >> 19) ^ + (zext32(RS2) >> 29) ^ (zext32(RS2) << 13); + +WRITE_RD(sext_xlen(result)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sha512sig1l.h b/vendor/riscv-isa-sim/riscv/insns/sha512sig1l.h new file mode 100644 index 00000000..491810d3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sha512sig1l.h @@ -0,0 +1,9 @@ + +require_rv32; +require_extension(EXT_ZKNH); + +reg_t result = + (zext32(RS1) << 3) ^ (zext32(RS1) >> 6) ^ (zext32(RS1) >> 19) ^ + (zext32(RS2) >> 29) ^ (zext32(RS2) << 26) ^ (zext32(RS2) << 13); + +WRITE_RD(sext_xlen(result)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sha512sum0.h b/vendor/riscv-isa-sim/riscv/insns/sha512sum0.h new file mode 100644 index 00000000..01182e67 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sha512sum0.h @@ -0,0 +1,13 @@ +require_rv64; +require_extension(EXT_ZKNH); + +#define ROR64(a,amt) ((a << (-amt & (64-1))) | (a >> (amt & (64-1)))) + +uint64_t a = RS1; + +WRITE_RD( + ROR64(a, 28) ^ ROR64(a,34) ^ ROR64(a,39) +); + +#undef ROR64 + diff --git a/vendor/riscv-isa-sim/riscv/insns/sha512sum0r.h b/vendor/riscv-isa-sim/riscv/insns/sha512sum0r.h new file mode 100644 index 00000000..cb6c636f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sha512sum0r.h @@ -0,0 +1,9 @@ + +require_rv32; +require_extension(EXT_ZKNH); + +reg_t result = + (zext32(RS1) << 25) ^ (zext32(RS1) << 30) ^ (zext32(RS1) >> 28) ^ + (zext32(RS2) >> 7) ^ (zext32(RS2) >> 2) ^ (zext32(RS2) << 4); + +WRITE_RD(sext_xlen(result)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sha512sum1.h b/vendor/riscv-isa-sim/riscv/insns/sha512sum1.h new file mode 100644 index 00000000..267d7dd9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sha512sum1.h @@ -0,0 +1,13 @@ +require_rv64; +require_extension(EXT_ZKNH); + +#define ROR64(a,amt) ((a << (-amt & (64-1))) | (a >> (amt & (64-1)))) + +uint64_t a = RS1; + +WRITE_RD( + ROR64(a, 14) ^ ROR64(a, 18) ^ ROR64(a, 41) +); + +#undef ROR64 + diff --git a/vendor/riscv-isa-sim/riscv/insns/sha512sum1r.h b/vendor/riscv-isa-sim/riscv/insns/sha512sum1r.h new file mode 100644 index 00000000..8109d0dc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sha512sum1r.h @@ -0,0 +1,9 @@ + +require_rv32; +require_extension(EXT_ZKNH); + +reg_t result = + (zext32(RS1) << 23) ^ (zext32(RS1) >> 14) ^ (zext32(RS1) >> 18) ^ + (zext32(RS2) >> 9) ^ (zext32(RS2) << 18) ^ (zext32(RS2) << 14); + +WRITE_RD(sext_xlen(result)); diff --git a/vendor/riscv-isa-sim/riscv/insns/shfl.h b/vendor/riscv-isa-sim/riscv/insns/shfl.h new file mode 100644 index 00000000..3004871e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/shfl.h @@ -0,0 +1,9 @@ +require_extension(EXT_XZBP); +reg_t x = RS1; +int shamt = RS2 & ((xlen-1) >> 1); +if (shamt & 16) x = (x & 0xFFFF00000000FFFFLL) | ((x & 0x0000FFFF00000000LL) >> 16) | ((x & 0x00000000FFFF0000LL) << 16); +if (shamt & 8) x = (x & 0xFF0000FFFF0000FFLL) | ((x & 0x00FF000000FF0000LL) >> 8) | ((x & 0x0000FF000000FF00LL) << 8); +if (shamt & 4) x = (x & 0xF00FF00FF00FF00FLL) | ((x & 0x0F000F000F000F00LL) >> 4) | ((x & 0x00F000F000F000F0LL) << 4); +if (shamt & 2) x = (x & 0xC3C3C3C3C3C3C3C3LL) | ((x & 0x3030303030303030LL) >> 2) | ((x & 0x0C0C0C0C0C0C0C0CLL) << 2); +if (shamt & 1) x = (x & 0x9999999999999999LL) | ((x & 0x4444444444444444LL) >> 1) | ((x & 0x2222222222222222LL) << 1); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/shfli.h b/vendor/riscv-isa-sim/riscv/insns/shfli.h new file mode 100644 index 00000000..f8636190 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/shfli.h @@ -0,0 +1,12 @@ +// Zbkb contains zip but not general shfli +require(((insn.rs2() == (xlen / 2 - 1)) && p->extension_enabled(EXT_ZBKB)) + || p->extension_enabled(EXT_XZBP)); +require(SHAMT < (xlen/2)); +reg_t x = RS1; +int shamt = SHAMT & ((xlen-1) >> 1); +if (shamt & 16) x = (x & 0xFFFF00000000FFFFLL) | ((x & 0x0000FFFF00000000LL) >> 16) | ((x & 0x00000000FFFF0000LL) << 16); +if (shamt & 8) x = (x & 0xFF0000FFFF0000FFLL) | ((x & 0x00FF000000FF0000LL) >> 8) | ((x & 0x0000FF000000FF00LL) << 8); +if (shamt & 4) x = (x & 0xF00FF00FF00FF00FLL) | ((x & 0x0F000F000F000F00LL) >> 4) | ((x & 0x00F000F000F000F0LL) << 4); +if (shamt & 2) x = (x & 0xC3C3C3C3C3C3C3C3LL) | ((x & 0x3030303030303030LL) >> 2) | ((x & 0x0C0C0C0C0C0C0C0CLL) << 2); +if (shamt & 1) x = (x & 0x9999999999999999LL) | ((x & 0x4444444444444444LL) >> 1) | ((x & 0x2222222222222222LL) << 1); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/shflw.h b/vendor/riscv-isa-sim/riscv/insns/shflw.h new file mode 100644 index 00000000..06ee3604 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/shflw.h @@ -0,0 +1,9 @@ +require_rv64; +require_extension(EXT_XZBP); +reg_t x = RS1; +int shamt = RS2 & 15; +if (shamt & 8) x = (x & 0xFF0000FFFF0000FFLL) | ((x & 0x00FF000000FF0000LL) >> 8) | ((x & 0x0000FF000000FF00LL) << 8); +if (shamt & 4) x = (x & 0xF00FF00FF00FF00FLL) | ((x & 0x0F000F000F000F00LL) >> 4) | ((x & 0x00F000F000F000F0LL) << 4); +if (shamt & 2) x = (x & 0xC3C3C3C3C3C3C3C3LL) | ((x & 0x3030303030303030LL) >> 2) | ((x & 0x0C0C0C0C0C0C0C0CLL) << 2); +if (shamt & 1) x = (x & 0x9999999999999999LL) | ((x & 0x4444444444444444LL) >> 1) | ((x & 0x2222222222222222LL) << 1); +WRITE_RD(sext32(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sinval_vma.h b/vendor/riscv-isa-sim/riscv/insns/sinval_vma.h new file mode 100644 index 00000000..5e431497 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sinval_vma.h @@ -0,0 +1,2 @@ +require_extension(EXT_SVINVAL); +#include "sfence_vma.h" diff --git a/vendor/riscv-isa-sim/riscv/insns/sll.h b/vendor/riscv-isa-sim/riscv/insns/sll.h new file mode 100644 index 00000000..7db76131 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sll.h @@ -0,0 +1 @@ +WRITE_RD(sext_xlen(RS1 << (RS2 & (xlen-1)))); diff --git a/vendor/riscv-isa-sim/riscv/insns/sll16.h b/vendor/riscv-isa-sim/riscv/insns/sll16.h new file mode 100644 index 00000000..9659f53e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sll16.h @@ -0,0 +1,3 @@ +P_X_ULOOP(16, 4, { + pd = ps1 << sa; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/sll32.h b/vendor/riscv-isa-sim/riscv/insns/sll32.h new file mode 100644 index 00000000..8a05b39b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sll32.h @@ -0,0 +1,4 @@ +require_rv64; +P_X_ULOOP(32, 5, { + pd = ps1 << sa; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/sll8.h b/vendor/riscv-isa-sim/riscv/insns/sll8.h new file mode 100644 index 00000000..b7f069a3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sll8.h @@ -0,0 +1,3 @@ +P_X_ULOOP(8, 3, { + pd = ps1 << sa; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/slli.h b/vendor/riscv-isa-sim/riscv/insns/slli.h new file mode 100644 index 00000000..26782fda --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/slli.h @@ -0,0 +1,2 @@ +require(SHAMT < xlen); +WRITE_RD(sext_xlen(RS1 << SHAMT)); diff --git a/vendor/riscv-isa-sim/riscv/insns/slli16.h b/vendor/riscv-isa-sim/riscv/insns/slli16.h new file mode 100644 index 00000000..8d89a61f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/slli16.h @@ -0,0 +1,3 @@ +P_I_ULOOP(16, 4, { + pd = ps1 << imm4u; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/slli32.h b/vendor/riscv-isa-sim/riscv/insns/slli32.h new file mode 100644 index 00000000..71d880af --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/slli32.h @@ -0,0 +1,4 @@ +require_rv64; +P_I_ULOOP(32, 5, { + pd = ps1 << imm5u; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/slli8.h b/vendor/riscv-isa-sim/riscv/insns/slli8.h new file mode 100644 index 00000000..c997496f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/slli8.h @@ -0,0 +1,3 @@ +P_I_ULOOP(8, 3, { + pd = ps1 << imm3u; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/slli_uw.h b/vendor/riscv-isa-sim/riscv/insns/slli_uw.h new file mode 100644 index 00000000..9cd48a91 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/slli_uw.h @@ -0,0 +1,3 @@ +require_rv64; +require_extension(EXT_ZBA); +WRITE_RD(sext_xlen(zext32(RS1) << SHAMT)); diff --git a/vendor/riscv-isa-sim/riscv/insns/slliw.h b/vendor/riscv-isa-sim/riscv/insns/slliw.h new file mode 100644 index 00000000..c1fda656 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/slliw.h @@ -0,0 +1,2 @@ +require_rv64; +WRITE_RD(sext32(RS1 << SHAMT)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sllw.h b/vendor/riscv-isa-sim/riscv/insns/sllw.h new file mode 100644 index 00000000..affe8944 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sllw.h @@ -0,0 +1,2 @@ +require_rv64; +WRITE_RD(sext32(RS1 << (RS2 & 0x1F))); diff --git a/vendor/riscv-isa-sim/riscv/insns/slo.h b/vendor/riscv-isa-sim/riscv/insns/slo.h new file mode 100644 index 00000000..a27ec37e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/slo.h @@ -0,0 +1,2 @@ +require_extension(EXT_XZBP); +WRITE_RD(sext_xlen(~((~RS1) << (RS2 & (xlen-1))))); diff --git a/vendor/riscv-isa-sim/riscv/insns/sloi.h b/vendor/riscv-isa-sim/riscv/insns/sloi.h new file mode 100644 index 00000000..62278b03 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sloi.h @@ -0,0 +1,3 @@ +require(SHAMT < xlen); +require_extension(EXT_XZBP); +WRITE_RD(sext_xlen(~((~RS1) << SHAMT))); diff --git a/vendor/riscv-isa-sim/riscv/insns/sloiw.h b/vendor/riscv-isa-sim/riscv/insns/sloiw.h new file mode 100644 index 00000000..492c94a1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sloiw.h @@ -0,0 +1,3 @@ +require_rv64; +require_extension(EXT_XZBP); +WRITE_RD(sext32(~((~RS1) << SHAMT))); diff --git a/vendor/riscv-isa-sim/riscv/insns/slow.h b/vendor/riscv-isa-sim/riscv/insns/slow.h new file mode 100644 index 00000000..04c90a45 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/slow.h @@ -0,0 +1,3 @@ +require_rv64; +require_extension(EXT_XZBP); +WRITE_RD(sext32(~((~RS1) << (RS2 & 0x1F)))); diff --git a/vendor/riscv-isa-sim/riscv/insns/slt.h b/vendor/riscv-isa-sim/riscv/insns/slt.h new file mode 100644 index 00000000..25ccd45e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/slt.h @@ -0,0 +1 @@ +WRITE_RD(sreg_t(RS1) < sreg_t(RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/slti.h b/vendor/riscv-isa-sim/riscv/insns/slti.h new file mode 100644 index 00000000..3671d241 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/slti.h @@ -0,0 +1 @@ +WRITE_RD(sreg_t(RS1) < sreg_t(insn.i_imm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/sltiu.h b/vendor/riscv-isa-sim/riscv/insns/sltiu.h new file mode 100644 index 00000000..f3984571 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sltiu.h @@ -0,0 +1 @@ +WRITE_RD(RS1 < reg_t(insn.i_imm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/sltu.h b/vendor/riscv-isa-sim/riscv/insns/sltu.h new file mode 100644 index 00000000..84d97a2a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sltu.h @@ -0,0 +1 @@ +WRITE_RD(RS1 < RS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/sm3p0.h b/vendor/riscv-isa-sim/riscv/insns/sm3p0.h new file mode 100644 index 00000000..0a72a930 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sm3p0.h @@ -0,0 +1,14 @@ + +require_extension(EXT_ZKSH); + +#define ROL32(a,amt) ((a >> (-amt & (32-1))) | (a << (amt & (32-1)))) + +uint32_t src = RS1; +uint32_t result = src ^ ROL32(src, 9) ^ ROL32(src, 17); + +WRITE_RD( + sext32(result) +); + +#undef ROL32 + diff --git a/vendor/riscv-isa-sim/riscv/insns/sm3p1.h b/vendor/riscv-isa-sim/riscv/insns/sm3p1.h new file mode 100644 index 00000000..ce3e36c7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sm3p1.h @@ -0,0 +1,14 @@ + +require_extension(EXT_ZKSH); + +#define ROL32(a,amt) ((a >> (-amt & (32-1))) | (a << (amt & (32-1)))) + +uint32_t src = RS1; +uint32_t result = src ^ ROL32(src, 15) ^ ROL32(src, 23); + +WRITE_RD( + sext32(result) +); + +#undef ROL32 + diff --git a/vendor/riscv-isa-sim/riscv/insns/sm4_common.h b/vendor/riscv-isa-sim/riscv/insns/sm4_common.h new file mode 100644 index 00000000..17f129f0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sm4_common.h @@ -0,0 +1,27 @@ + +// SM4 forward SBox. SM4 has no inverse sbox. +static const uint8_t sm4_sbox[256] = { + 0xD6, 0x90, 0xE9, 0xFE, 0xCC, 0xE1, 0x3D, 0xB7, 0x16, 0xB6, 0x14, 0xC2, + 0x28, 0xFB, 0x2C, 0x05, 0x2B, 0x67, 0x9A, 0x76, 0x2A, 0xBE, 0x04, 0xC3, + 0xAA, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99, 0x9C, 0x42, 0x50, 0xF4, + 0x91, 0xEF, 0x98, 0x7A, 0x33, 0x54, 0x0B, 0x43, 0xED, 0xCF, 0xAC, 0x62, + 0xE4, 0xB3, 0x1C, 0xA9, 0xC9, 0x08, 0xE8, 0x95, 0x80, 0xDF, 0x94, 0xFA, + 0x75, 0x8F, 0x3F, 0xA6, 0x47, 0x07, 0xA7, 0xFC, 0xF3, 0x73, 0x17, 0xBA, + 0x83, 0x59, 0x3C, 0x19, 0xE6, 0x85, 0x4F, 0xA8, 0x68, 0x6B, 0x81, 0xB2, + 0x71, 0x64, 0xDA, 0x8B, 0xF8, 0xEB, 0x0F, 0x4B, 0x70, 0x56, 0x9D, 0x35, + 0x1E, 0x24, 0x0E, 0x5E, 0x63, 0x58, 0xD1, 0xA2, 0x25, 0x22, 0x7C, 0x3B, + 0x01, 0x21, 0x78, 0x87, 0xD4, 0x00, 0x46, 0x57, 0x9F, 0xD3, 0x27, 0x52, + 0x4C, 0x36, 0x02, 0xE7, 0xA0, 0xC4, 0xC8, 0x9E, 0xEA, 0xBF, 0x8A, 0xD2, + 0x40, 0xC7, 0x38, 0xB5, 0xA3, 0xF7, 0xF2, 0xCE, 0xF9, 0x61, 0x15, 0xA1, + 0xE0, 0xAE, 0x5D, 0xA4, 0x9B, 0x34, 0x1A, 0x55, 0xAD, 0x93, 0x32, 0x30, + 0xF5, 0x8C, 0xB1, 0xE3, 0x1D, 0xF6, 0xE2, 0x2E, 0x82, 0x66, 0xCA, 0x60, + 0xC0, 0x29, 0x23, 0xAB, 0x0D, 0x53, 0x4E, 0x6F, 0xD5, 0xDB, 0x37, 0x45, + 0xDE, 0xFD, 0x8E, 0x2F, 0x03, 0xFF, 0x6A, 0x72, 0x6D, 0x6C, 0x5B, 0x51, + 0x8D, 0x1B, 0xAF, 0x92, 0xBB, 0xDD, 0xBC, 0x7F, 0x11, 0xD9, 0x5C, 0x41, + 0x1F, 0x10, 0x5A, 0xD8, 0x0A, 0xC1, 0x31, 0x88, 0xA5, 0xCD, 0x7B, 0xBD, + 0x2D, 0x74, 0xD0, 0x12, 0xB8, 0xE5, 0xB4, 0xB0, 0x89, 0x69, 0x97, 0x4A, + 0x0C, 0x96, 0x77, 0x7E, 0x65, 0xB9, 0xF1, 0x09, 0xC5, 0x6E, 0xC6, 0x84, + 0x18, 0xF0, 0x7D, 0xEC, 0x3A, 0xDC, 0x4D, 0x20, 0x79, 0xEE, 0x5F, 0x3E, + 0xD7, 0xCB, 0x39, 0x48 +}; + diff --git a/vendor/riscv-isa-sim/riscv/insns/sm4ed.h b/vendor/riscv-isa-sim/riscv/insns/sm4ed.h new file mode 100644 index 00000000..a78c1a87 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sm4ed.h @@ -0,0 +1,22 @@ + +require_extension(EXT_ZKSED); + +#include "sm4_common.h" + +uint8_t bs = insn.bs(); + +uint32_t sb_in = (RS2 >> (8*bs)) & 0xFF; +uint32_t sb_out = (uint32_t)sm4_sbox[sb_in]; + +uint32_t linear = sb_out ^ (sb_out << 8) ^ + (sb_out << 2) ^ + (sb_out << 18) ^ + ((sb_out & 0x3f) << 26) ^ + ((sb_out & 0xC0) << 10) ; + +uint32_t rotl = (linear << (8*bs)) | (linear >> (32-8*bs)); + +uint32_t result = rotl ^ RS1; + +WRITE_RD(sext32(result)); + diff --git a/vendor/riscv-isa-sim/riscv/insns/sm4ks.h b/vendor/riscv-isa-sim/riscv/insns/sm4ks.h new file mode 100644 index 00000000..c93c97ed --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sm4ks.h @@ -0,0 +1,20 @@ + +require_extension(EXT_ZKSED); + +#include "sm4_common.h" + +uint8_t bs = insn.bs(); + +uint32_t sb_in = (RS2 >> (8*bs)) & 0xFF; +uint32_t sb_out = sm4_sbox[sb_in]; + +uint32_t x = sb_out ^ + ((sb_out & 0x07) << 29) ^ ((sb_out & 0xFE) << 7) ^ + ((sb_out & 0x01) << 23) ^ ((sb_out & 0xF8) << 13) ; + +uint32_t rotl = (x << (8*bs)) | (x >> (32-8*bs)); + +uint32_t result = rotl ^ RS1; + +WRITE_RD(sext32(result)); + diff --git a/vendor/riscv-isa-sim/riscv/insns/smal.h b/vendor/riscv-isa-sim/riscv/insns/smal.h new file mode 100644 index 00000000..09b818d7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smal.h @@ -0,0 +1,11 @@ +require_extension(EXT_ZPSFOPERAND); +sreg_t res = 0; +if (xlen == 32) { + res = RS1_PAIR; + res += sext_xlen(P_SH(RS2, 0) * P_SH(RS2, 1)); + WRITE_RD_PAIR(res); +} else { + res = sext_xlen(P_SH(RS2, 0) * P_SH(RS2, 1)) + + sext_xlen(P_SH(RS2, 2) * P_SH(RS2, 3)) + RS1; + WRITE_RD(res); +} diff --git a/vendor/riscv-isa-sim/riscv/insns/smalbb.h b/vendor/riscv-isa-sim/riscv/insns/smalbb.h new file mode 100644 index 00000000..9a2e7994 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smalbb.h @@ -0,0 +1,3 @@ +P_64_PROFILE_REDUCTION(32, { + rd += (sreg_t)P_SH(ps1, 0) * (sreg_t)P_SH(ps2, 0); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smalbt.h b/vendor/riscv-isa-sim/riscv/insns/smalbt.h new file mode 100644 index 00000000..42cf71cc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smalbt.h @@ -0,0 +1,3 @@ +P_64_PROFILE_REDUCTION(32, { + rd += (sreg_t)P_SH(ps1, 0) * (sreg_t)P_SH(ps2, 1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smalda.h b/vendor/riscv-isa-sim/riscv/insns/smalda.h new file mode 100644 index 00000000..8c067939 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smalda.h @@ -0,0 +1,3 @@ +P_64_PROFILE_REDUCTION(16, { + rd += ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smaldrs.h b/vendor/riscv-isa-sim/riscv/insns/smaldrs.h new file mode 100644 index 00000000..84e17699 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smaldrs.h @@ -0,0 +1,7 @@ +P_64_PROFILE_REDUCTION(16, { + if (i & 1) { + rd -= ps1 * ps2; + } else { + rd += ps1 * ps2; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smalds.h b/vendor/riscv-isa-sim/riscv/insns/smalds.h new file mode 100644 index 00000000..e3cfbd7c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smalds.h @@ -0,0 +1,7 @@ +P_64_PROFILE_REDUCTION(16, { + if (i & 1) { + rd += ps1 * ps2; + } else { + rd -= ps1 * ps2; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smaltt.h b/vendor/riscv-isa-sim/riscv/insns/smaltt.h new file mode 100644 index 00000000..1e654a06 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smaltt.h @@ -0,0 +1,3 @@ +P_64_PROFILE_REDUCTION(32, { + rd += P_SH(ps1, 1) * P_SH(ps2, 1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smalxda.h b/vendor/riscv-isa-sim/riscv/insns/smalxda.h new file mode 100644 index 00000000..77675ee9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smalxda.h @@ -0,0 +1,4 @@ +P_64_PROFILE_REDUCTION(32, { + rd += (sreg_t)P_SH(ps1, 0) * (sreg_t)P_SH(ps2, 1); + rd += (sreg_t)P_SH(ps1, 1) * (sreg_t)P_SH(ps2, 0); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smalxds.h b/vendor/riscv-isa-sim/riscv/insns/smalxds.h new file mode 100644 index 00000000..2ae7eb53 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smalxds.h @@ -0,0 +1,4 @@ +P_64_PROFILE_REDUCTION(32, { + rd += (sreg_t)P_SH(ps1, 1) * (sreg_t)P_SH(ps2, 0); + rd -= (sreg_t)P_SH(ps1, 0) * (sreg_t)P_SH(ps2, 1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smaqa.h b/vendor/riscv-isa-sim/riscv/insns/smaqa.h new file mode 100644 index 00000000..83dda84f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smaqa.h @@ -0,0 +1,3 @@ +P_REDUCTION_LOOP(32, 8, true, false, { + pd_res += ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smaqa_su.h b/vendor/riscv-isa-sim/riscv/insns/smaqa_su.h new file mode 100644 index 00000000..4ee0eb78 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smaqa_su.h @@ -0,0 +1,3 @@ +P_REDUCTION_SULOOP(32, 8, true, false, { + pd_res += ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smar64.h b/vendor/riscv-isa-sim/riscv/insns/smar64.h new file mode 100644 index 00000000..5c5da771 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smar64.h @@ -0,0 +1,3 @@ +P_64_PROFILE_REDUCTION(32, { + rd += ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smax16.h b/vendor/riscv-isa-sim/riscv/insns/smax16.h new file mode 100644 index 00000000..083d63ce --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smax16.h @@ -0,0 +1,3 @@ +P_LOOP(16, { + pd = (ps1 > ps2) ? ps1 : ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smax32.h b/vendor/riscv-isa-sim/riscv/insns/smax32.h new file mode 100644 index 00000000..6563cfc1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smax32.h @@ -0,0 +1,3 @@ +P_LOOP(32, { + pd = (ps1 > ps2) ? ps1 : ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smax8.h b/vendor/riscv-isa-sim/riscv/insns/smax8.h new file mode 100644 index 00000000..773039e3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smax8.h @@ -0,0 +1,3 @@ +P_LOOP(8, { + pd = (ps1 > ps2) ? ps1 : ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smbb16.h b/vendor/riscv-isa-sim/riscv/insns/smbb16.h new file mode 100644 index 00000000..0813bfbf --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smbb16.h @@ -0,0 +1,3 @@ +P_LOOP(32, { + pd = P_SH(ps1, 0) * P_SH(ps2, 0); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smbt16.h b/vendor/riscv-isa-sim/riscv/insns/smbt16.h new file mode 100644 index 00000000..953b3a62 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smbt16.h @@ -0,0 +1,3 @@ +P_LOOP(32, { + pd = P_SH(ps1, 0) * P_SH(ps2, 1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smbt32.h b/vendor/riscv-isa-sim/riscv/insns/smbt32.h new file mode 100644 index 00000000..35059ad3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smbt32.h @@ -0,0 +1,3 @@ +require_rv64; +require_extension(EXT_ZPN); +WRITE_RD((sreg_t)P_SW(RS1, 0) * P_SW(RS2, 1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/smdrs.h b/vendor/riscv-isa-sim/riscv/insns/smdrs.h new file mode 100644 index 00000000..8f47f7d4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smdrs.h @@ -0,0 +1,6 @@ +P_REDUCTION_LOOP(32, 16, false, false, { + if (j & 1) + pd_res -= ps1 * ps2; + else + pd_res += ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smdrs32.h b/vendor/riscv-isa-sim/riscv/insns/smdrs32.h new file mode 100644 index 00000000..c397013d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smdrs32.h @@ -0,0 +1,7 @@ +require_rv64; +require_extension(EXT_ZPN); + +sreg_t mres0 = (sreg_t)P_SW(RS1, 0) * P_SW(RS2, 0); +sreg_t mres1 = (sreg_t)P_SW(RS1, 1) * P_SW(RS2, 1); + +WRITE_RD(mres0 - mres1); diff --git a/vendor/riscv-isa-sim/riscv/insns/smds.h b/vendor/riscv-isa-sim/riscv/insns/smds.h new file mode 100644 index 00000000..248679a5 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smds.h @@ -0,0 +1,6 @@ +P_REDUCTION_LOOP(32, 16, false, false, { + if (j & 1) + pd_res += ps1 * ps2; + else + pd_res -= ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smds32.h b/vendor/riscv-isa-sim/riscv/insns/smds32.h new file mode 100644 index 00000000..e7fdeedd --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smds32.h @@ -0,0 +1,7 @@ +require_rv64; +require_extension(EXT_ZPN); + +sreg_t mres0 = (sreg_t)P_SW(RS1, 0) * P_SW(RS2, 0); +sreg_t mres1 = (sreg_t)P_SW(RS1, 1) * P_SW(RS2, 1); + +WRITE_RD(mres1 - mres0); diff --git a/vendor/riscv-isa-sim/riscv/insns/smin16.h b/vendor/riscv-isa-sim/riscv/insns/smin16.h new file mode 100644 index 00000000..afb1bb3b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smin16.h @@ -0,0 +1,3 @@ +P_LOOP(16, { + pd = (ps1 < ps2) ? ps1 : ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smin32.h b/vendor/riscv-isa-sim/riscv/insns/smin32.h new file mode 100644 index 00000000..22847cb4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smin32.h @@ -0,0 +1,3 @@ +P_LOOP(32, { + pd = (ps1 < ps2) ? ps1 : ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smin8.h b/vendor/riscv-isa-sim/riscv/insns/smin8.h new file mode 100644 index 00000000..084e0e66 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smin8.h @@ -0,0 +1,3 @@ +P_LOOP(8, { + pd = (ps1 < ps2) ? ps1 : ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smmul.h b/vendor/riscv-isa-sim/riscv/insns/smmul.h new file mode 100644 index 00000000..df0dd239 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smmul.h @@ -0,0 +1,4 @@ +P_LOOP(32, { + int64_t mres = (int64_t) ps1 * (int64_t) ps2; + pd = mres >> 32; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smmul_u.h b/vendor/riscv-isa-sim/riscv/insns/smmul_u.h new file mode 100644 index 00000000..55fa617f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smmul_u.h @@ -0,0 +1,4 @@ +P_LOOP(32, { + int64_t mres = (int64_t) ps1 * (int64_t) ps2; + pd = ((mres >> 31) + 1) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smmwb.h b/vendor/riscv-isa-sim/riscv/insns/smmwb.h new file mode 100644 index 00000000..f94aa9c0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smmwb.h @@ -0,0 +1,4 @@ +P_LOOP(32, { + int64_t mres = (int64_t) ps1 * (int16_t) P_H(ps2, 0); + pd = mres >> 16; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smmwb_u.h b/vendor/riscv-isa-sim/riscv/insns/smmwb_u.h new file mode 100644 index 00000000..47c6e362 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smmwb_u.h @@ -0,0 +1,4 @@ +P_LOOP(32, { + int64_t mres = (int64_t) ps1 * (int16_t) P_H(ps2, 0); + pd = ((mres >> 15) + 1) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smmwt.h b/vendor/riscv-isa-sim/riscv/insns/smmwt.h new file mode 100644 index 00000000..d8cf439f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smmwt.h @@ -0,0 +1,4 @@ +P_LOOP(32, { + int64_t mres = (int64_t) ps1 * (int16_t) P_H(ps2, 1); + pd = mres >> 16; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smmwt_u.h b/vendor/riscv-isa-sim/riscv/insns/smmwt_u.h new file mode 100644 index 00000000..5c5a671f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smmwt_u.h @@ -0,0 +1,4 @@ +P_LOOP(32, { + int64_t mres = (int64_t) ps1 * (int16_t) P_H(ps2, 1); + pd = ((mres >> 15) + 1) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smslda.h b/vendor/riscv-isa-sim/riscv/insns/smslda.h new file mode 100644 index 00000000..f8389609 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smslda.h @@ -0,0 +1,3 @@ +P_64_PROFILE_REDUCTION(16, { + rd -= ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smslxda.h b/vendor/riscv-isa-sim/riscv/insns/smslxda.h new file mode 100644 index 00000000..7e25f9b8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smslxda.h @@ -0,0 +1,4 @@ +P_64_PROFILE_REDUCTION(32, { + rd -= (sreg_t)P_SH(ps1, 1) * (sreg_t)P_SH(ps2, 0); + rd -= (sreg_t)P_SH(ps1, 0) * (sreg_t)P_SH(ps2, 1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smsr64.h b/vendor/riscv-isa-sim/riscv/insns/smsr64.h new file mode 100644 index 00000000..a43559fd --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smsr64.h @@ -0,0 +1,3 @@ +P_64_PROFILE_REDUCTION(32, { + rd -= ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smtt16.h b/vendor/riscv-isa-sim/riscv/insns/smtt16.h new file mode 100644 index 00000000..e19c50a3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smtt16.h @@ -0,0 +1,3 @@ +P_LOOP(32, { + pd = P_SH(ps1, 1) * P_SH(ps2, 1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smtt32.h b/vendor/riscv-isa-sim/riscv/insns/smtt32.h new file mode 100644 index 00000000..c7fd9e71 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smtt32.h @@ -0,0 +1,3 @@ +require_rv64; +require_extension(EXT_ZPN); +WRITE_RD((sreg_t)P_SW(RS1, 1) * P_SW(RS2, 1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/smul16.h b/vendor/riscv-isa-sim/riscv/insns/smul16.h new file mode 100644 index 00000000..8f87612d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smul16.h @@ -0,0 +1,3 @@ +P_MUL_LOOP(16, { + pd = ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smul8.h b/vendor/riscv-isa-sim/riscv/insns/smul8.h new file mode 100644 index 00000000..155e50e0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smul8.h @@ -0,0 +1,3 @@ +P_MUL_LOOP(8, { + pd = ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smulx16.h b/vendor/riscv-isa-sim/riscv/insns/smulx16.h new file mode 100644 index 00000000..14ae047f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smulx16.h @@ -0,0 +1,3 @@ +P_MUL_CROSS_LOOP(16, { + pd = ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smulx8.h b/vendor/riscv-isa-sim/riscv/insns/smulx8.h new file mode 100644 index 00000000..b5ae41ca --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smulx8.h @@ -0,0 +1,3 @@ +P_MUL_CROSS_LOOP(8, { + pd = ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smxds.h b/vendor/riscv-isa-sim/riscv/insns/smxds.h new file mode 100644 index 00000000..845d01f6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smxds.h @@ -0,0 +1,6 @@ +P_REDUCTION_CROSS_LOOP(32, 16, false, false, { + if (j & 1) + pd_res += ps1 * ps2; + else + pd_res -= ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smxds32.h b/vendor/riscv-isa-sim/riscv/insns/smxds32.h new file mode 100644 index 00000000..8eeedcf9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smxds32.h @@ -0,0 +1,7 @@ +require_rv64; +require_extension(EXT_ZPN); + +sreg_t mres0 = (sreg_t)P_SW(RS1, 0) * P_SW(RS2, 1); +sreg_t mres1 = (sreg_t)P_SW(RS1, 1) * P_SW(RS2, 0); + +WRITE_RD(mres1 - mres0); diff --git a/vendor/riscv-isa-sim/riscv/insns/sra.h b/vendor/riscv-isa-sim/riscv/insns/sra.h new file mode 100644 index 00000000..403b9b73 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sra.h @@ -0,0 +1 @@ +WRITE_RD(sext_xlen(sext_xlen(RS1) >> (RS2 & (xlen-1)))); diff --git a/vendor/riscv-isa-sim/riscv/insns/sra16.h b/vendor/riscv-isa-sim/riscv/insns/sra16.h new file mode 100644 index 00000000..84a40fb5 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sra16.h @@ -0,0 +1,3 @@ +P_X_LOOP(16, 4, { + pd = ps1 >> sa; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/sra16_u.h b/vendor/riscv-isa-sim/riscv/insns/sra16_u.h new file mode 100644 index 00000000..c28178e7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sra16_u.h @@ -0,0 +1,6 @@ +P_X_LOOP(16, 4, { + if(sa > 0) + pd = ((ps1 >> (sa - 1)) + 1) >> 1; + else + pd = ps1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/sra32.h b/vendor/riscv-isa-sim/riscv/insns/sra32.h new file mode 100644 index 00000000..8b192e0c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sra32.h @@ -0,0 +1,4 @@ +require_rv64; +P_X_LOOP(32, 5, { + pd = ps1 >> sa; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/sra32_u.h b/vendor/riscv-isa-sim/riscv/insns/sra32_u.h new file mode 100644 index 00000000..e062a886 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sra32_u.h @@ -0,0 +1,7 @@ +require_rv64; +P_X_LOOP(32, 5, { + if(sa > 0) + pd = (((uint64_t)(ps1 >> (sa - 1))) + 1) >> 1; + else + pd = ps1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/sra8.h b/vendor/riscv-isa-sim/riscv/insns/sra8.h new file mode 100644 index 00000000..de1bd64e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sra8.h @@ -0,0 +1,3 @@ +P_X_LOOP(8, 3, { + pd = ps1 >> sa; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/sra8_u.h b/vendor/riscv-isa-sim/riscv/insns/sra8_u.h new file mode 100644 index 00000000..7061fc48 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sra8_u.h @@ -0,0 +1,6 @@ +P_X_LOOP(8, 3, { + if(sa > 0) + pd = ((ps1 >> (sa - 1)) + 1) >> 1; + else + pd = ps1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/sra_u.h b/vendor/riscv-isa-sim/riscv/insns/sra_u.h new file mode 100644 index 00000000..d7c395b0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sra_u.h @@ -0,0 +1,9 @@ +require_extension(EXT_ZPN); +sreg_t rs1 = sext_xlen(RS1); +reg_t sa = get_field(RS2, make_mask64(0, xlen == 32 ? 5 : 6)); + +if (sa > 0) { + WRITE_RD(sext_xlen(((rs1 >> (sa - 1)) + 1) >> 1)); +} else { + WRITE_RD(sext_xlen(rs1)); +} diff --git a/vendor/riscv-isa-sim/riscv/insns/srai.h b/vendor/riscv-isa-sim/riscv/insns/srai.h new file mode 100644 index 00000000..7ae1d4e5 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srai.h @@ -0,0 +1,2 @@ +require(SHAMT < xlen); +WRITE_RD(sext_xlen(sext_xlen(RS1) >> SHAMT)); diff --git a/vendor/riscv-isa-sim/riscv/insns/srai16.h b/vendor/riscv-isa-sim/riscv/insns/srai16.h new file mode 100644 index 00000000..63f98073 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srai16.h @@ -0,0 +1,3 @@ +P_I_LOOP(16, 4, { + pd = ps1 >> imm4u; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srai16_u.h b/vendor/riscv-isa-sim/riscv/insns/srai16_u.h new file mode 100644 index 00000000..d7835817 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srai16_u.h @@ -0,0 +1,6 @@ +P_I_LOOP(16, 4, { + if (imm4u > 0) + pd = ((ps1 >> (imm4u - 1)) + 1) >> 1; + else + pd = ps1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srai32.h b/vendor/riscv-isa-sim/riscv/insns/srai32.h new file mode 100644 index 00000000..9058ba9d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srai32.h @@ -0,0 +1,4 @@ +require_rv64; +P_I_LOOP(32, 5, { + pd = ps1 >> imm5u; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srai32_u.h b/vendor/riscv-isa-sim/riscv/insns/srai32_u.h new file mode 100644 index 00000000..a5fe4d3d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srai32_u.h @@ -0,0 +1,7 @@ +require_rv64; +P_I_LOOP(32, 5, { + if (imm5u > 0) + pd = (((uint64_t)(ps1 >> (imm5u - 1))) + 1) >> 1; + else + pd = ps1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srai8.h b/vendor/riscv-isa-sim/riscv/insns/srai8.h new file mode 100644 index 00000000..0141933e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srai8.h @@ -0,0 +1,3 @@ +P_I_LOOP(8, 3, { + pd = ps1 >> imm3u; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srai8_u.h b/vendor/riscv-isa-sim/riscv/insns/srai8_u.h new file mode 100644 index 00000000..be7bfaf7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srai8_u.h @@ -0,0 +1,6 @@ +P_I_LOOP(8, 3, { + if (imm3u > 0) + pd = ((ps1 >> (imm3u - 1)) + 1) >> 1; + else + pd = ps1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srai_u.h b/vendor/riscv-isa-sim/riscv/insns/srai_u.h new file mode 100644 index 00000000..f170083e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srai_u.h @@ -0,0 +1,10 @@ +require_extension(EXT_ZPN); +sreg_t rs1 = sext_xlen(RS1); +reg_t sa = insn.p_imm6(); +require(sa < (unsigned long)xlen); // imm[5] == 1 is illegal on rv32 + +if (sa > 0) { + WRITE_RD(sext_xlen(((rs1 >> (sa - 1)) + 1) >> 1)); +} else { + WRITE_RD(sext_xlen(rs1)); +} diff --git a/vendor/riscv-isa-sim/riscv/insns/sraiw.h b/vendor/riscv-isa-sim/riscv/insns/sraiw.h new file mode 100644 index 00000000..b344459b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sraiw.h @@ -0,0 +1,2 @@ +require_rv64; +WRITE_RD(sext32(int32_t(RS1) >> SHAMT)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sraiw_u.h b/vendor/riscv-isa-sim/riscv/insns/sraiw_u.h new file mode 100644 index 00000000..3559d7fa --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sraiw_u.h @@ -0,0 +1,9 @@ +require_rv64; +require_extension(EXT_ZPN); + +reg_t sa = insn.p_imm5(); +if (sa != 0) { + WRITE_RD(sext32(((P_SW(RS1, 0) >> (sa - 1)) + 1) >> 1)); +} else { + WRITE_RD(sext32(P_SW(RS1, 0))); +} diff --git a/vendor/riscv-isa-sim/riscv/insns/sraw.h b/vendor/riscv-isa-sim/riscv/insns/sraw.h new file mode 100644 index 00000000..ca9c0c76 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sraw.h @@ -0,0 +1,2 @@ +require_rv64; +WRITE_RD(sext32(int32_t(RS1) >> (RS2 & 0x1F))); diff --git a/vendor/riscv-isa-sim/riscv/insns/sret.h b/vendor/riscv-isa-sim/riscv/insns/sret.h new file mode 100644 index 00000000..5102c15c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sret.h @@ -0,0 +1,27 @@ +require_extension('S'); +reg_t prev_hstatus = STATE.hstatus->read(); +if (STATE.v) { + if (STATE.prv == PRV_U || get_field(prev_hstatus, HSTATUS_VTSR)) + require_novirt(); +} else { + require_privilege(get_field(STATE.mstatus->read(), MSTATUS_TSR) ? PRV_M : PRV_S); +} +reg_t next_pc = p->get_state()->sepc->read(); +set_pc_and_serialize(next_pc); +reg_t s = STATE.sstatus->read(); +reg_t prev_prv = get_field(s, MSTATUS_SPP); +s = set_field(s, MSTATUS_SIE, get_field(s, MSTATUS_SPIE)); +s = set_field(s, MSTATUS_SPIE, 1); +s = set_field(s, MSTATUS_SPP, PRV_U); +STATE.sstatus->write(s); +p->set_privilege(prev_prv); +if (!STATE.v) { + if (p->extension_enabled('H')) { + reg_t prev_virt = get_field(prev_hstatus, HSTATUS_SPV); + p->set_virt(prev_virt); + reg_t new_hstatus = set_field(prev_hstatus, HSTATUS_SPV, 0); + STATE.hstatus->write(new_hstatus); + } + + STATE.mstatus->write(set_field(STATE.mstatus->read(), MSTATUS_MPRV, 0)); +} diff --git a/vendor/riscv-isa-sim/riscv/insns/srl.h b/vendor/riscv-isa-sim/riscv/insns/srl.h new file mode 100644 index 00000000..0dabe9ec --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srl.h @@ -0,0 +1 @@ +WRITE_RD(sext_xlen(zext_xlen(RS1) >> (RS2 & (xlen-1)))); diff --git a/vendor/riscv-isa-sim/riscv/insns/srl16.h b/vendor/riscv-isa-sim/riscv/insns/srl16.h new file mode 100644 index 00000000..35f9cecb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srl16.h @@ -0,0 +1,3 @@ +P_X_ULOOP(16, 4, { + pd = ps1 >> sa; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srl16_u.h b/vendor/riscv-isa-sim/riscv/insns/srl16_u.h new file mode 100644 index 00000000..17d1bc00 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srl16_u.h @@ -0,0 +1,7 @@ +P_X_ULOOP(16, 4, { + if (sa > 0) { + pd = ((ps1 >> (sa - 1)) + 1) >> 1; + } else { + pd = ps1; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srl32.h b/vendor/riscv-isa-sim/riscv/insns/srl32.h new file mode 100644 index 00000000..2ad116cc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srl32.h @@ -0,0 +1,4 @@ +require_rv64; +P_X_ULOOP(32, 5, { + pd = ps1 >> sa; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srl32_u.h b/vendor/riscv-isa-sim/riscv/insns/srl32_u.h new file mode 100644 index 00000000..d6375469 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srl32_u.h @@ -0,0 +1,8 @@ +require_rv64; +P_X_ULOOP(32, 5, { + if (sa > 0) { + pd = (((uint64_t)(ps1 >> (sa - 1))) + 1) >> 1; + } else { + pd = ps1; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srl8.h b/vendor/riscv-isa-sim/riscv/insns/srl8.h new file mode 100644 index 00000000..f7d74a94 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srl8.h @@ -0,0 +1,3 @@ +P_X_ULOOP(8, 3, { + pd = ps1 >> sa; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srl8_u.h b/vendor/riscv-isa-sim/riscv/insns/srl8_u.h new file mode 100644 index 00000000..26415a56 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srl8_u.h @@ -0,0 +1,7 @@ +P_X_ULOOP(8, 3, { + if (sa > 0) { + pd = ((ps1 >> (sa - 1)) + 1) >> 1; + } else { + pd = ps1; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srli.h b/vendor/riscv-isa-sim/riscv/insns/srli.h new file mode 100644 index 00000000..ea0b40d6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srli.h @@ -0,0 +1,2 @@ +require(SHAMT < xlen); +WRITE_RD(sext_xlen(zext_xlen(RS1) >> SHAMT)); diff --git a/vendor/riscv-isa-sim/riscv/insns/srli16.h b/vendor/riscv-isa-sim/riscv/insns/srli16.h new file mode 100644 index 00000000..cbd685ff --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srli16.h @@ -0,0 +1,3 @@ +P_I_ULOOP(16, 4, { + pd = ps1 >> imm4u; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srli16_u.h b/vendor/riscv-isa-sim/riscv/insns/srli16_u.h new file mode 100644 index 00000000..2ba533a3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srli16_u.h @@ -0,0 +1,7 @@ +P_I_ULOOP(16, 4, { + if (imm4u > 0) { + pd = ((ps1 >> (imm4u - 1)) + 1) >> 1; + } else { + pd = ps1; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srli32.h b/vendor/riscv-isa-sim/riscv/insns/srli32.h new file mode 100644 index 00000000..f3d53af4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srli32.h @@ -0,0 +1,4 @@ +require_rv64; +P_I_ULOOP(32, 5, { + pd = ps1 >> imm5u; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srli32_u.h b/vendor/riscv-isa-sim/riscv/insns/srli32_u.h new file mode 100644 index 00000000..6d2327fc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srli32_u.h @@ -0,0 +1,8 @@ +require_rv64; +P_I_ULOOP(32, 5, { + if (imm5u > 0) { + pd = (((uint64_t)(ps1 >> (imm5u - 1))) + 1) >> 1; + } else { + pd = ps1; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srli8.h b/vendor/riscv-isa-sim/riscv/insns/srli8.h new file mode 100644 index 00000000..103f0ed2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srli8.h @@ -0,0 +1,3 @@ +P_I_ULOOP(8, 3, { + pd = ps1 >> imm3u; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srli8_u.h b/vendor/riscv-isa-sim/riscv/insns/srli8_u.h new file mode 100644 index 00000000..9fa7f8c3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srli8_u.h @@ -0,0 +1,7 @@ +P_I_ULOOP(8, 3, { + if (imm3u > 0) { + pd = ((ps1 >> (imm3u - 1)) + 1) >> 1; + } else { + pd = ps1; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srliw.h b/vendor/riscv-isa-sim/riscv/insns/srliw.h new file mode 100644 index 00000000..c657d3da --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srliw.h @@ -0,0 +1,2 @@ +require_rv64; +WRITE_RD(sext32((uint32_t)RS1 >> SHAMT)); diff --git a/vendor/riscv-isa-sim/riscv/insns/srlw.h b/vendor/riscv-isa-sim/riscv/insns/srlw.h new file mode 100644 index 00000000..a8eb4519 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srlw.h @@ -0,0 +1,2 @@ +require_rv64; +WRITE_RD(sext32((uint32_t)RS1 >> (RS2 & 0x1F))); diff --git a/vendor/riscv-isa-sim/riscv/insns/sro.h b/vendor/riscv-isa-sim/riscv/insns/sro.h new file mode 100644 index 00000000..3ac050da --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sro.h @@ -0,0 +1,2 @@ +require_extension(EXT_XZBP); +WRITE_RD(sext_xlen(~((zext_xlen(~RS1)) >> (RS2 & (xlen-1))))); diff --git a/vendor/riscv-isa-sim/riscv/insns/sroi.h b/vendor/riscv-isa-sim/riscv/insns/sroi.h new file mode 100644 index 00000000..e8788928 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sroi.h @@ -0,0 +1,3 @@ +require(SHAMT < xlen); +require_extension(EXT_XZBP); +WRITE_RD(sext_xlen(~((zext_xlen(~RS1)) >> SHAMT))); diff --git a/vendor/riscv-isa-sim/riscv/insns/sroiw.h b/vendor/riscv-isa-sim/riscv/insns/sroiw.h new file mode 100644 index 00000000..83480705 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sroiw.h @@ -0,0 +1,3 @@ +require_rv64; +require_extension(EXT_XZBP); +WRITE_RD(sext32(~((~(uint32_t)RS1) >> SHAMT))); diff --git a/vendor/riscv-isa-sim/riscv/insns/srow.h b/vendor/riscv-isa-sim/riscv/insns/srow.h new file mode 100644 index 00000000..808af8db --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srow.h @@ -0,0 +1,3 @@ +require_rv64; +require_extension(EXT_XZBP); +WRITE_RD(sext32(~((~(uint32_t)RS1) >> (RS2 & 0x1F)))); diff --git a/vendor/riscv-isa-sim/riscv/insns/stas16.h b/vendor/riscv-isa-sim/riscv/insns/stas16.h new file mode 100644 index 00000000..949e5c85 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/stas16.h @@ -0,0 +1,5 @@ +P_STRAIGHT_LOOP(16, { + pd = ps1 + ps2; +}, { + pd = ps1 - ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/stas32.h b/vendor/riscv-isa-sim/riscv/insns/stas32.h new file mode 100644 index 00000000..2009a693 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/stas32.h @@ -0,0 +1,6 @@ +require_rv64; +P_STRAIGHT_LOOP(32, { + pd = ps1 + ps2; +}, { + pd = ps1 - ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/stsa16.h b/vendor/riscv-isa-sim/riscv/insns/stsa16.h new file mode 100644 index 00000000..7e4371d2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/stsa16.h @@ -0,0 +1,5 @@ +P_STRAIGHT_LOOP(16, { + pd = ps1 - ps2; +}, { + pd = ps1 + ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/stsa32.h b/vendor/riscv-isa-sim/riscv/insns/stsa32.h new file mode 100644 index 00000000..e2d81b70 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/stsa32.h @@ -0,0 +1,6 @@ +require_rv64; +P_STRAIGHT_LOOP(32, { + pd = ps1 - ps2; +}, { + pd = ps1 + ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/sub.h b/vendor/riscv-isa-sim/riscv/insns/sub.h new file mode 100644 index 00000000..9ed48f74 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sub.h @@ -0,0 +1 @@ +WRITE_RD(sext_xlen(RS1 - RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sub16.h b/vendor/riscv-isa-sim/riscv/insns/sub16.h new file mode 100644 index 00000000..5d36aaf4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sub16.h @@ -0,0 +1,3 @@ +P_LOOP(16, { + pd = ps1 - ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/sub32.h b/vendor/riscv-isa-sim/riscv/insns/sub32.h new file mode 100644 index 00000000..70bbc53c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sub32.h @@ -0,0 +1,4 @@ +require_rv64; +P_LOOP(32, { + pd = ps1 - ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/sub64.h b/vendor/riscv-isa-sim/riscv/insns/sub64.h new file mode 100644 index 00000000..d9775264 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sub64.h @@ -0,0 +1,3 @@ +P_64_PROFILE({ + rd = rs1 - rs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/sub8.h b/vendor/riscv-isa-sim/riscv/insns/sub8.h new file mode 100644 index 00000000..7f13d615 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sub8.h @@ -0,0 +1,3 @@ +P_LOOP(8, { + pd = ps1 - ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/subw.h b/vendor/riscv-isa-sim/riscv/insns/subw.h new file mode 100644 index 00000000..b4168efe --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/subw.h @@ -0,0 +1,3 @@ +require_rv64; +WRITE_RD(sext32(RS1 - RS2)); + diff --git a/vendor/riscv-isa-sim/riscv/insns/sunpkd810.h b/vendor/riscv-isa-sim/riscv/insns/sunpkd810.h new file mode 100644 index 00000000..2a2f7c32 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sunpkd810.h @@ -0,0 +1 @@ +P_SUNPKD8(1, 0) diff --git a/vendor/riscv-isa-sim/riscv/insns/sunpkd820.h b/vendor/riscv-isa-sim/riscv/insns/sunpkd820.h new file mode 100644 index 00000000..84d5248b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sunpkd820.h @@ -0,0 +1 @@ +P_SUNPKD8(2, 0) diff --git a/vendor/riscv-isa-sim/riscv/insns/sunpkd830.h b/vendor/riscv-isa-sim/riscv/insns/sunpkd830.h new file mode 100644 index 00000000..88179075 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sunpkd830.h @@ -0,0 +1 @@ +P_SUNPKD8(3, 0) diff --git a/vendor/riscv-isa-sim/riscv/insns/sunpkd831.h b/vendor/riscv-isa-sim/riscv/insns/sunpkd831.h new file mode 100644 index 00000000..98ed748d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sunpkd831.h @@ -0,0 +1 @@ +P_SUNPKD8(3, 1) diff --git a/vendor/riscv-isa-sim/riscv/insns/sunpkd832.h b/vendor/riscv-isa-sim/riscv/insns/sunpkd832.h new file mode 100644 index 00000000..b0ac29f9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sunpkd832.h @@ -0,0 +1 @@ +P_SUNPKD8(3, 2) diff --git a/vendor/riscv-isa-sim/riscv/insns/sw.h b/vendor/riscv-isa-sim/riscv/insns/sw.h new file mode 100644 index 00000000..aa5ead37 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sw.h @@ -0,0 +1 @@ +MMU.store_uint32(RS1 + insn.s_imm(), RS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/uclip16.h b/vendor/riscv-isa-sim/riscv/insns/uclip16.h new file mode 100644 index 00000000..4cc25190 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/uclip16.h @@ -0,0 +1,13 @@ +require_vector_vs; +P_I_LOOP(16, 4, { + int64_t uint_max = imm4u ? UINT64_MAX >> (64 - imm4u) : 0; + pd = ps1; + + if (ps1 > uint_max) { + pd = uint_max; + P_SET_OV(1); + } else if (ps1 < 0) { + pd = 0; + P_SET_OV(1); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/uclip32.h b/vendor/riscv-isa-sim/riscv/insns/uclip32.h new file mode 100644 index 00000000..d347650c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/uclip32.h @@ -0,0 +1,13 @@ +require_vector_vs; +P_I_LOOP(32, 5, { + int64_t uint_max = imm5u ? UINT64_MAX >> (64 - imm5u) : 0; + pd = ps1; + + if (ps1 > uint_max) { + pd = uint_max; + P_SET_OV(1); + } else if (ps1 < 0) { + pd = 0; + P_SET_OV(1); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/uclip8.h b/vendor/riscv-isa-sim/riscv/insns/uclip8.h new file mode 100644 index 00000000..b8a95c07 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/uclip8.h @@ -0,0 +1,13 @@ +require_vector_vs; +P_I_LOOP(8, 3, { + int64_t uint_max = imm3u ? UINT64_MAX >> (64 - imm3u) : 0; + pd = ps1; + + if (ps1 > uint_max) { + pd = uint_max; + P_SET_OV(1); + } else if (ps1 < 0) { + pd = 0; + P_SET_OV(1); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ucmple16.h b/vendor/riscv-isa-sim/riscv/insns/ucmple16.h new file mode 100644 index 00000000..fe2b93fb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ucmple16.h @@ -0,0 +1,3 @@ +P_ULOOP(16, { + pd = (ps1 <= ps2) ? -1 : 0; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ucmple8.h b/vendor/riscv-isa-sim/riscv/insns/ucmple8.h new file mode 100644 index 00000000..bd44cb0e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ucmple8.h @@ -0,0 +1,3 @@ +P_ULOOP(8, { + pd = (ps1 <= ps2) ? -1 : 0; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ucmplt16.h b/vendor/riscv-isa-sim/riscv/insns/ucmplt16.h new file mode 100644 index 00000000..fa7512c3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ucmplt16.h @@ -0,0 +1,3 @@ +P_ULOOP(16, { + pd = (ps1 < ps2) ? -1 : 0; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ucmplt8.h b/vendor/riscv-isa-sim/riscv/insns/ucmplt8.h new file mode 100644 index 00000000..6fa85b1a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ucmplt8.h @@ -0,0 +1,3 @@ +P_ULOOP(8, { + pd = (ps1 < ps2) ? -1 : 0; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ukadd16.h b/vendor/riscv-isa-sim/riscv/insns/ukadd16.h new file mode 100644 index 00000000..680b5b63 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ukadd16.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_ULOOP(16, { + bool sat = false; + pd = (sat_addu(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ukadd32.h b/vendor/riscv-isa-sim/riscv/insns/ukadd32.h new file mode 100644 index 00000000..dd836c1e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ukadd32.h @@ -0,0 +1,7 @@ +require_vector_vs; +require_rv64; +P_ULOOP(32, { + bool sat = false; + pd = (sat_addu(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ukadd64.h b/vendor/riscv-isa-sim/riscv/insns/ukadd64.h new file mode 100644 index 00000000..d7e98f34 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ukadd64.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_64_UPROFILE({ + bool sat = false; + rd = (sat_addu(rs1, rs2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ukadd8.h b/vendor/riscv-isa-sim/riscv/insns/ukadd8.h new file mode 100644 index 00000000..4bcada20 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ukadd8.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_ULOOP(8, { + bool sat = false; + pd = (sat_addu(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ukaddh.h b/vendor/riscv-isa-sim/riscv/insns/ukaddh.h new file mode 100644 index 00000000..30c970a3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ukaddh.h @@ -0,0 +1,5 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t res = (sreg_t)P_H(RS1, 0) + (sreg_t)P_H(RS2, 0); +P_SATU(res, 16); +WRITE_RD(sext_xlen((int16_t)res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/ukaddw.h b/vendor/riscv-isa-sim/riscv/insns/ukaddw.h new file mode 100644 index 00000000..5d4d91ef --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ukaddw.h @@ -0,0 +1,5 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t res = (sreg_t)P_W(RS1, 0) + (sreg_t)P_W(RS2, 0); +P_SATU(res, 32); +WRITE_RD(sext32(res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/ukcras16.h b/vendor/riscv-isa-sim/riscv/insns/ukcras16.h new file mode 100644 index 00000000..54b9a104 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ukcras16.h @@ -0,0 +1,10 @@ +require_vector_vs; +P_CROSS_ULOOP(16, { + bool sat = false; + pd = (sat_addu(ps1, ps2, sat)); + P_SET_OV(sat); +}, { + bool sat = false; + pd = (sat_subu(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ukcras32.h b/vendor/riscv-isa-sim/riscv/insns/ukcras32.h new file mode 100644 index 00000000..001644c2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ukcras32.h @@ -0,0 +1,11 @@ +require_vector_vs; +require_rv64; +P_CROSS_ULOOP(32, { + bool sat = false; + pd = (sat_addu(ps1, ps2, sat)); + P_SET_OV(sat); +}, { + bool sat = false; + pd = (sat_subu(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ukcrsa16.h b/vendor/riscv-isa-sim/riscv/insns/ukcrsa16.h new file mode 100644 index 00000000..343063ce --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ukcrsa16.h @@ -0,0 +1,10 @@ +require_vector_vs; +P_CROSS_ULOOP(16, { + bool sat = false; + pd = (sat_subu(ps1, ps2, sat)); + P_SET_OV(sat); +}, { + bool sat = false; + pd = (sat_addu(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ukcrsa32.h b/vendor/riscv-isa-sim/riscv/insns/ukcrsa32.h new file mode 100644 index 00000000..260f181e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ukcrsa32.h @@ -0,0 +1,11 @@ +require_vector_vs; +require_rv64; +P_CROSS_ULOOP(32, { + bool sat = false; + pd = (sat_subu(ps1, ps2, sat)); + P_SET_OV(sat); +}, { + bool sat = false; + pd = (sat_addu(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ukmar64.h b/vendor/riscv-isa-sim/riscv/insns/ukmar64.h new file mode 100644 index 00000000..e33ad7d8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ukmar64.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_64_UPROFILE_REDUCTION(32, { + bool sat = false; + rd = (sat_addu(rd, ps1 * ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ukmsr64.h b/vendor/riscv-isa-sim/riscv/insns/ukmsr64.h new file mode 100644 index 00000000..9a38cb07 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ukmsr64.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_64_UPROFILE_REDUCTION(32, { + bool sat = false; + rd = (sat_subu(rd, ps1 * ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ukstas16.h b/vendor/riscv-isa-sim/riscv/insns/ukstas16.h new file mode 100644 index 00000000..a8203e43 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ukstas16.h @@ -0,0 +1,10 @@ +require_vector_vs; +P_STRAIGHT_ULOOP(16, { + bool sat = false; + pd = (sat_addu(ps1, ps2, sat)); + P_SET_OV(sat); +}, { + bool sat = false; + pd = (sat_subu(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ukstas32.h b/vendor/riscv-isa-sim/riscv/insns/ukstas32.h new file mode 100644 index 00000000..c734eb6c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ukstas32.h @@ -0,0 +1,11 @@ +require_vector_vs; +require_rv64; +P_STRAIGHT_ULOOP(32, { + bool sat = false; + pd = (sat_addu(ps1, ps2, sat)); + P_SET_OV(sat); +}, { + bool sat = false; + pd = (sat_subu(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ukstsa16.h b/vendor/riscv-isa-sim/riscv/insns/ukstsa16.h new file mode 100644 index 00000000..81451112 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ukstsa16.h @@ -0,0 +1,10 @@ +require_vector_vs; +P_STRAIGHT_ULOOP(16, { + bool sat = false; + pd = (sat_subu(ps1, ps2, sat)); + P_SET_OV(sat); +}, { + bool sat = false; + pd = (sat_addu(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ukstsa32.h b/vendor/riscv-isa-sim/riscv/insns/ukstsa32.h new file mode 100644 index 00000000..9eb713ec --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ukstsa32.h @@ -0,0 +1,11 @@ +require_vector_vs; +require_rv64; +P_STRAIGHT_ULOOP(32, { + bool sat = false; + pd = (sat_subu(ps1, ps2, sat)); + P_SET_OV(sat); +}, { + bool sat = false; + pd = (sat_addu(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/uksub16.h b/vendor/riscv-isa-sim/riscv/insns/uksub16.h new file mode 100644 index 00000000..7fba16e9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/uksub16.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_ULOOP(16, { + bool sat = false; + pd = (sat_subu(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/uksub32.h b/vendor/riscv-isa-sim/riscv/insns/uksub32.h new file mode 100644 index 00000000..3d4913bb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/uksub32.h @@ -0,0 +1,7 @@ +require_vector_vs; +require_rv64; +P_ULOOP(32, { + bool sat = false; + pd = (sat_subu(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/uksub64.h b/vendor/riscv-isa-sim/riscv/insns/uksub64.h new file mode 100644 index 00000000..0d2bb050 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/uksub64.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_64_UPROFILE({ + bool sat = false; + rd = (sat_subu(rs1, rs2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/uksub8.h b/vendor/riscv-isa-sim/riscv/insns/uksub8.h new file mode 100644 index 00000000..f26621e7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/uksub8.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_ULOOP(8, { + bool sat = false; + pd = (sat_subu(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/uksubh.h b/vendor/riscv-isa-sim/riscv/insns/uksubh.h new file mode 100644 index 00000000..ac239895 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/uksubh.h @@ -0,0 +1,5 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t res = (sreg_t)P_H(RS1, 0) - (sreg_t)P_H(RS2, 0); +P_SATU(res, 16); +WRITE_RD(sext_xlen((int16_t)res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/uksubw.h b/vendor/riscv-isa-sim/riscv/insns/uksubw.h new file mode 100644 index 00000000..41a32e70 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/uksubw.h @@ -0,0 +1,5 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t res = (sreg_t)P_W(RS1, 0) - (sreg_t)P_W(RS2, 0); +P_SATU(res, 32); +WRITE_RD(sext32(res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/umaqa.h b/vendor/riscv-isa-sim/riscv/insns/umaqa.h new file mode 100644 index 00000000..474b174a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/umaqa.h @@ -0,0 +1,3 @@ +P_REDUCTION_ULOOP(32, 8, true, false, { + pd_res += ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/umar64.h b/vendor/riscv-isa-sim/riscv/insns/umar64.h new file mode 100644 index 00000000..0a8a3524 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/umar64.h @@ -0,0 +1,3 @@ +P_64_UPROFILE_REDUCTION(32, { + rd += ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/umax16.h b/vendor/riscv-isa-sim/riscv/insns/umax16.h new file mode 100644 index 00000000..e60ed4ac --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/umax16.h @@ -0,0 +1,3 @@ +P_ULOOP(16, { + pd = (ps1 > ps2) ? ps1 : ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/umax32.h b/vendor/riscv-isa-sim/riscv/insns/umax32.h new file mode 100644 index 00000000..6156345c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/umax32.h @@ -0,0 +1,4 @@ +require_rv64; +P_ULOOP(32, { + pd = (ps1 > ps2) ? ps1 : ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/umax8.h b/vendor/riscv-isa-sim/riscv/insns/umax8.h new file mode 100644 index 00000000..8a575a68 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/umax8.h @@ -0,0 +1,3 @@ +P_ULOOP(8, { + pd = (ps1 > ps2) ? ps1 : ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/umin16.h b/vendor/riscv-isa-sim/riscv/insns/umin16.h new file mode 100644 index 00000000..d4142118 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/umin16.h @@ -0,0 +1,3 @@ +P_ULOOP(16, { + pd = (ps1 < ps2) ? ps1 : ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/umin32.h b/vendor/riscv-isa-sim/riscv/insns/umin32.h new file mode 100644 index 00000000..96699452 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/umin32.h @@ -0,0 +1,4 @@ +require_rv64; +P_ULOOP(32, { + pd = (ps1 < ps2) ? ps1 : ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/umin8.h b/vendor/riscv-isa-sim/riscv/insns/umin8.h new file mode 100644 index 00000000..bea8ccd2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/umin8.h @@ -0,0 +1,3 @@ +P_ULOOP(8, { + pd = (ps1 < ps2) ? ps1 : ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/umsr64.h b/vendor/riscv-isa-sim/riscv/insns/umsr64.h new file mode 100644 index 00000000..0e186d96 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/umsr64.h @@ -0,0 +1,3 @@ +P_64_UPROFILE_REDUCTION(32, { + rd -= ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/umul16.h b/vendor/riscv-isa-sim/riscv/insns/umul16.h new file mode 100644 index 00000000..860f9420 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/umul16.h @@ -0,0 +1,3 @@ +P_MUL_ULOOP(16, { + pd = ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/umul8.h b/vendor/riscv-isa-sim/riscv/insns/umul8.h new file mode 100644 index 00000000..04d7a6ef --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/umul8.h @@ -0,0 +1,3 @@ +P_MUL_ULOOP(8, { + pd = ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/umulx16.h b/vendor/riscv-isa-sim/riscv/insns/umulx16.h new file mode 100644 index 00000000..5abe9cf8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/umulx16.h @@ -0,0 +1,3 @@ +P_MUL_CROSS_ULOOP(16, { + pd = ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/umulx8.h b/vendor/riscv-isa-sim/riscv/insns/umulx8.h new file mode 100644 index 00000000..a2b073de --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/umulx8.h @@ -0,0 +1,3 @@ +P_MUL_CROSS_ULOOP(8, { + pd = ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/unshfl.h b/vendor/riscv-isa-sim/riscv/insns/unshfl.h new file mode 100644 index 00000000..78990b87 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/unshfl.h @@ -0,0 +1,9 @@ +require_extension(EXT_XZBP); +reg_t x = RS1; +int shamt = RS2 & ((xlen-1) >> 1); +if (shamt & 1) x = (x & 0x9999999999999999LL) | ((x & 0x4444444444444444LL) >> 1) | ((x & 0x2222222222222222LL) << 1); +if (shamt & 2) x = (x & 0xC3C3C3C3C3C3C3C3LL) | ((x & 0x3030303030303030LL) >> 2) | ((x & 0x0C0C0C0C0C0C0C0CLL) << 2); +if (shamt & 4) x = (x & 0xF00FF00FF00FF00FLL) | ((x & 0x0F000F000F000F00LL) >> 4) | ((x & 0x00F000F000F000F0LL) << 4); +if (shamt & 8) x = (x & 0xFF0000FFFF0000FFLL) | ((x & 0x00FF000000FF0000LL) >> 8) | ((x & 0x0000FF000000FF00LL) << 8); +if (shamt & 16) x = (x & 0xFFFF00000000FFFFLL) | ((x & 0x0000FFFF00000000LL) >> 16) | ((x & 0x00000000FFFF0000LL) << 16); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/unshfli.h b/vendor/riscv-isa-sim/riscv/insns/unshfli.h new file mode 100644 index 00000000..26920f14 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/unshfli.h @@ -0,0 +1,12 @@ +// Zbkb contains unzip but not general unshfli +require(((insn.rs2() == (xlen / 2 - 1)) && p->extension_enabled(EXT_ZBKB)) + || p->extension_enabled(EXT_XZBP)); +require(SHAMT < (xlen/2)); +reg_t x = RS1; +int shamt = SHAMT & ((xlen-1) >> 1); +if (shamt & 1) x = (x & 0x9999999999999999LL) | ((x & 0x4444444444444444LL) >> 1) | ((x & 0x2222222222222222LL) << 1); +if (shamt & 2) x = (x & 0xC3C3C3C3C3C3C3C3LL) | ((x & 0x3030303030303030LL) >> 2) | ((x & 0x0C0C0C0C0C0C0C0CLL) << 2); +if (shamt & 4) x = (x & 0xF00FF00FF00FF00FLL) | ((x & 0x0F000F000F000F00LL) >> 4) | ((x & 0x00F000F000F000F0LL) << 4); +if (shamt & 8) x = (x & 0xFF0000FFFF0000FFLL) | ((x & 0x00FF000000FF0000LL) >> 8) | ((x & 0x0000FF000000FF00LL) << 8); +if (shamt & 16) x = (x & 0xFFFF00000000FFFFLL) | ((x & 0x0000FFFF00000000LL) >> 16) | ((x & 0x00000000FFFF0000LL) << 16); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/unshflw.h b/vendor/riscv-isa-sim/riscv/insns/unshflw.h new file mode 100644 index 00000000..776534e7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/unshflw.h @@ -0,0 +1,9 @@ +require_rv64; +require_extension(EXT_XZBP); +reg_t x = RS1; +int shamt = RS2 & 15; +if (shamt & 1) x = (x & 0x9999999999999999LL) | ((x & 0x4444444444444444LL) >> 1) | ((x & 0x2222222222222222LL) << 1); +if (shamt & 2) x = (x & 0xC3C3C3C3C3C3C3C3LL) | ((x & 0x3030303030303030LL) >> 2) | ((x & 0x0C0C0C0C0C0C0C0CLL) << 2); +if (shamt & 4) x = (x & 0xF00FF00FF00FF00FLL) | ((x & 0x0F000F000F000F00LL) >> 4) | ((x & 0x00F000F000F000F0LL) << 4); +if (shamt & 8) x = (x & 0xFF0000FFFF0000FFLL) | ((x & 0x00FF000000FF0000LL) >> 8) | ((x & 0x0000FF000000FF00LL) << 8); +WRITE_RD(sext32(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/uradd16.h b/vendor/riscv-isa-sim/riscv/insns/uradd16.h new file mode 100644 index 00000000..29610bf4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/uradd16.h @@ -0,0 +1,3 @@ +P_ULOOP(16, { + pd = (ps1 + ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/uradd32.h b/vendor/riscv-isa-sim/riscv/insns/uradd32.h new file mode 100644 index 00000000..4f791d91 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/uradd32.h @@ -0,0 +1,4 @@ +require_rv64; +P_ULOOP(32, { + pd = ((uint64_t)ps1 + ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/uradd64.h b/vendor/riscv-isa-sim/riscv/insns/uradd64.h new file mode 100644 index 00000000..f6787e16 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/uradd64.h @@ -0,0 +1,9 @@ +P_64_UPROFILE({ + rd = rs1 + rs2; + if (rd < rs1) { + rd >>= 1; + rd |= ((reg_t)1 << 63); + } else { + rd >>= 1; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/uradd8.h b/vendor/riscv-isa-sim/riscv/insns/uradd8.h new file mode 100644 index 00000000..412e3d59 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/uradd8.h @@ -0,0 +1,3 @@ +P_ULOOP(8, { + pd = (ps1 + ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/uraddw.h b/vendor/riscv-isa-sim/riscv/insns/uraddw.h new file mode 100644 index 00000000..6a9455fc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/uraddw.h @@ -0,0 +1,4 @@ +require_extension(EXT_ZPN); +reg_t res = (reg_t)P_W(RS1, 0) + (reg_t)P_W(RS2, 0); +res >>= 1; +WRITE_RD(sext_xlen((int32_t)res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/urcras16.h b/vendor/riscv-isa-sim/riscv/insns/urcras16.h new file mode 100644 index 00000000..2bac3156 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/urcras16.h @@ -0,0 +1,5 @@ +P_CROSS_ULOOP(16, { + pd = (ps1 + ps2) >> 1; +}, { + pd = (ps1 - ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/urcras32.h b/vendor/riscv-isa-sim/riscv/insns/urcras32.h new file mode 100644 index 00000000..a08e8777 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/urcras32.h @@ -0,0 +1,6 @@ +require_rv64; +P_CROSS_ULOOP(32, { + pd = ((uint64_t)ps1 + ps2) >> 1; +}, { + pd = ((uint64_t)ps1 - ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/urcrsa16.h b/vendor/riscv-isa-sim/riscv/insns/urcrsa16.h new file mode 100644 index 00000000..a890990a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/urcrsa16.h @@ -0,0 +1,5 @@ +P_CROSS_ULOOP(16, { + pd = (ps1 - ps2) >> 1; +}, { + pd = (ps1 + ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/urcrsa32.h b/vendor/riscv-isa-sim/riscv/insns/urcrsa32.h new file mode 100644 index 00000000..7ddd4f95 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/urcrsa32.h @@ -0,0 +1,6 @@ +require_rv64; +P_CROSS_ULOOP(32, { + pd = ((uint64_t)ps1 - ps2) >> 1; +}, { + pd = ((uint64_t)ps1 + ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/urstas16.h b/vendor/riscv-isa-sim/riscv/insns/urstas16.h new file mode 100644 index 00000000..3cc89d81 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/urstas16.h @@ -0,0 +1,5 @@ +P_STRAIGHT_ULOOP(16, { + pd = (ps1 + ps2) >> 1; +}, { + pd = (ps1 - ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/urstas32.h b/vendor/riscv-isa-sim/riscv/insns/urstas32.h new file mode 100644 index 00000000..668fa66a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/urstas32.h @@ -0,0 +1,6 @@ +require_rv64; +P_STRAIGHT_ULOOP(32, { + pd = ((uint64_t)ps1 + ps2) >> 1; +}, { + pd = ((uint64_t)ps1 - ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/urstsa16.h b/vendor/riscv-isa-sim/riscv/insns/urstsa16.h new file mode 100644 index 00000000..a88bcf13 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/urstsa16.h @@ -0,0 +1,5 @@ +P_STRAIGHT_ULOOP(16, { + pd = (ps1 - ps2) >> 1; +}, { + pd = (ps1 + ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/urstsa32.h b/vendor/riscv-isa-sim/riscv/insns/urstsa32.h new file mode 100644 index 00000000..097337ce --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/urstsa32.h @@ -0,0 +1,6 @@ +require_rv64; +P_STRAIGHT_ULOOP(32, { + pd = ((uint64_t)ps1 - ps2) >> 1; +}, { + pd = ((uint64_t)ps1 + ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ursub16.h b/vendor/riscv-isa-sim/riscv/insns/ursub16.h new file mode 100644 index 00000000..bcd09f02 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ursub16.h @@ -0,0 +1,3 @@ +P_ULOOP(16, { + pd = (ps1 - ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ursub32.h b/vendor/riscv-isa-sim/riscv/insns/ursub32.h new file mode 100644 index 00000000..215436d0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ursub32.h @@ -0,0 +1,4 @@ +require_rv64; +P_ULOOP(32, { + pd = ((uint64_t)ps1 - ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ursub64.h b/vendor/riscv-isa-sim/riscv/insns/ursub64.h new file mode 100644 index 00000000..3d845a0c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ursub64.h @@ -0,0 +1,9 @@ +P_64_UPROFILE({ + rd = rs1 - rs2; + if (rd > rs1) { + rd >>= 1; + rd |= ((reg_t)1 << 63); + } else { + rd >>= 1; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ursub8.h b/vendor/riscv-isa-sim/riscv/insns/ursub8.h new file mode 100644 index 00000000..d66a9957 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ursub8.h @@ -0,0 +1,3 @@ +P_ULOOP(8, { + pd = (ps1 - ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ursubw.h b/vendor/riscv-isa-sim/riscv/insns/ursubw.h new file mode 100644 index 00000000..5c90fde4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ursubw.h @@ -0,0 +1,4 @@ +require_extension(EXT_ZPN); +reg_t res = (reg_t)P_W(RS1, 0) - (reg_t)P_W(RS2, 0); +res >>= 1; +WRITE_RD(sext_xlen((int32_t)res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/vaadd_vv.h b/vendor/riscv-isa-sim/riscv/insns/vaadd_vv.h new file mode 100644 index 00000000..0e7e39b4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vaadd_vv.h @@ -0,0 +1,2 @@ +// vaadd.vv vd, vs2, vs1 +VI_VV_LOOP_AVG(+); diff --git a/vendor/riscv-isa-sim/riscv/insns/vaadd_vx.h b/vendor/riscv-isa-sim/riscv/insns/vaadd_vx.h new file mode 100644 index 00000000..120e63eb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vaadd_vx.h @@ -0,0 +1,2 @@ +// vaadd.vx vd, vs2, rs1 +VI_VX_LOOP_AVG(+); diff --git a/vendor/riscv-isa-sim/riscv/insns/vaaddu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vaaddu_vv.h new file mode 100644 index 00000000..7eb7a895 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vaaddu_vv.h @@ -0,0 +1,2 @@ +// vaaddu.vv vd, vs2, vs1 +VI_VV_ULOOP_AVG(+); diff --git a/vendor/riscv-isa-sim/riscv/insns/vaaddu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vaaddu_vx.h new file mode 100644 index 00000000..325206f9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vaaddu_vx.h @@ -0,0 +1,2 @@ +// vaaddu.vx vd, vs2, rs1 +VI_VX_ULOOP_AVG(+); diff --git a/vendor/riscv-isa-sim/riscv/insns/vadc_vim.h b/vendor/riscv-isa-sim/riscv/insns/vadc_vim.h new file mode 100644 index 00000000..4b0356f2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vadc_vim.h @@ -0,0 +1,5 @@ +// vadc.vim vd, vs2, simm5, v0 +VI_XI_LOOP_WITH_CARRY +({ + vd = (uint128_t)((op_mask & simm5) + (op_mask & vs2) + carry); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vadc_vvm.h b/vendor/riscv-isa-sim/riscv/insns/vadc_vvm.h new file mode 100644 index 00000000..7b41dd9f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vadc_vvm.h @@ -0,0 +1,5 @@ +// vadc.vvm vd, vs2, rs1, v0 +VI_VV_LOOP_WITH_CARRY +({ + vd = (uint128_t)((op_mask & vs1) + (op_mask & vs2) + carry); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vadc_vxm.h b/vendor/riscv-isa-sim/riscv/insns/vadc_vxm.h new file mode 100644 index 00000000..b1f8886d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vadc_vxm.h @@ -0,0 +1,5 @@ +// vadc.vxm vd, vs2, rs1, v0 +VI_XI_LOOP_WITH_CARRY +({ + vd = (uint128_t)((op_mask & rs1) + (op_mask & vs2) + carry); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vadd_vi.h b/vendor/riscv-isa-sim/riscv/insns/vadd_vi.h new file mode 100644 index 00000000..45fc6b74 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vadd_vi.h @@ -0,0 +1,5 @@ +// vadd.vi vd, simm5, vs2, vm +VI_VI_LOOP +({ + vd = simm5 + vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vadd_vv.h b/vendor/riscv-isa-sim/riscv/insns/vadd_vv.h new file mode 100644 index 00000000..45c6bdcb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vadd_vv.h @@ -0,0 +1,5 @@ +// vadd.vv vd, vs1, vs2, vm +VI_VV_LOOP +({ + vd = vs1 + vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vadd_vx.h b/vendor/riscv-isa-sim/riscv/insns/vadd_vx.h new file mode 100644 index 00000000..33e72ee4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vadd_vx.h @@ -0,0 +1,5 @@ +// vadd.vx vd, rs1, vs2, vm +VI_VX_LOOP +({ + vd = rs1 + vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoaddei16_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoaddei16_v.h new file mode 100644 index 00000000..3cb3db70 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoaddei16_v.h @@ -0,0 +1,2 @@ +//vamoadde.v vd, (rs1), vs2, vd +VI_AMO({ return lhs + vs3; }, uint, e16); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoaddei32_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoaddei32_v.h new file mode 100644 index 00000000..2bd77fcb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoaddei32_v.h @@ -0,0 +1,2 @@ +//vamoadde.v vd, (rs1), vs2, vd +VI_AMO({ return lhs + vs3; }, uint, e32); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoaddei64_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoaddei64_v.h new file mode 100644 index 00000000..79ca7482 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoaddei64_v.h @@ -0,0 +1,2 @@ +//vamoadde.v vd, (rs1), vs2, vd +VI_AMO({ return lhs + vs3; }, uint, e64); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoaddei8_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoaddei8_v.h new file mode 100644 index 00000000..06b8c793 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoaddei8_v.h @@ -0,0 +1,2 @@ +//vamoadde.v vd, (rs1), vs2, vd +VI_AMO({ return lhs + vs3; }, uint, e8); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoandei16_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoandei16_v.h new file mode 100644 index 00000000..be119497 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoandei16_v.h @@ -0,0 +1,2 @@ +//vamoande.v vd, (rs1), vs2, vd +VI_AMO({ return lhs & vs3; }, uint, e16); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoandei32_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoandei32_v.h new file mode 100644 index 00000000..71506704 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoandei32_v.h @@ -0,0 +1,2 @@ +//vamoande.v vd, (rs1), vs2, vd +VI_AMO({ return lhs & vs3; }, uint, e32); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoandei64_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoandei64_v.h new file mode 100644 index 00000000..3efae3b5 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoandei64_v.h @@ -0,0 +1,2 @@ +//vamoande.v vd, (rs1), vs2, vd +VI_AMO({ return lhs & vs3; }, uint, e64); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoandei8_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoandei8_v.h new file mode 100644 index 00000000..c47645d3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoandei8_v.h @@ -0,0 +1,2 @@ +//vamoande.v vd, (rs1), vs2, vd +VI_AMO({ return lhs & vs3; }, uint, e8); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamomaxei16_v.h b/vendor/riscv-isa-sim/riscv/insns/vamomaxei16_v.h new file mode 100644 index 00000000..ca67893e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamomaxei16_v.h @@ -0,0 +1,2 @@ +//vamomaxe.v vd, (rs1), vs2, vd +VI_AMO({ return lhs >= vs3 ? lhs : vs3; }, int, e16); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamomaxei32_v.h b/vendor/riscv-isa-sim/riscv/insns/vamomaxei32_v.h new file mode 100644 index 00000000..b6823cd0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamomaxei32_v.h @@ -0,0 +1,2 @@ +//vamomaxe.v vd, (rs1), vs2, vd +VI_AMO({ return lhs >= vs3 ? lhs : vs3; }, int, e32); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamomaxei64_v.h b/vendor/riscv-isa-sim/riscv/insns/vamomaxei64_v.h new file mode 100644 index 00000000..46e8a3bb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamomaxei64_v.h @@ -0,0 +1,2 @@ +//vamomaxe.v vd, (rs1), vs2, vd +VI_AMO({ return lhs >= vs3 ? lhs : vs3; }, int, e64); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamomaxei8_v.h b/vendor/riscv-isa-sim/riscv/insns/vamomaxei8_v.h new file mode 100644 index 00000000..9697b3a4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamomaxei8_v.h @@ -0,0 +1,2 @@ +//vamomaxe.v vd, (rs1), vs2, vd +VI_AMO({ return lhs >= vs3 ? lhs : vs3; }, int, e8); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamomaxuei16_v.h b/vendor/riscv-isa-sim/riscv/insns/vamomaxuei16_v.h new file mode 100644 index 00000000..e05971df --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamomaxuei16_v.h @@ -0,0 +1,2 @@ +//vamomaxue.v vd, (rs1), vs2, vd +VI_AMO({ return lhs >= vs3 ? lhs : vs3;; }, uint, e16); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamomaxuei32_v.h b/vendor/riscv-isa-sim/riscv/insns/vamomaxuei32_v.h new file mode 100644 index 00000000..9b873543 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamomaxuei32_v.h @@ -0,0 +1,2 @@ +//vamomaxue.v vd, (rs1), vs2, vd +VI_AMO({ return lhs >= vs3 ? lhs : vs3;; }, uint, e32); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamomaxuei64_v.h b/vendor/riscv-isa-sim/riscv/insns/vamomaxuei64_v.h new file mode 100644 index 00000000..bbfbc9f2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamomaxuei64_v.h @@ -0,0 +1,2 @@ +//vamomaxue.v vd, (rs1), vs2, vd +VI_AMO({ return lhs >= vs3 ? lhs : vs3;; }, uint, e64); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamomaxuei8_v.h b/vendor/riscv-isa-sim/riscv/insns/vamomaxuei8_v.h new file mode 100644 index 00000000..357ba245 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamomaxuei8_v.h @@ -0,0 +1,2 @@ +//vamomaxue.v vd, (rs1), vs2, vd +VI_AMO({ return lhs >= vs3 ? lhs : vs3;; }, uint, e8); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamominei16_v.h b/vendor/riscv-isa-sim/riscv/insns/vamominei16_v.h new file mode 100644 index 00000000..9d1ecac6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamominei16_v.h @@ -0,0 +1,2 @@ +//vamomine.v vd, (rs1), vs2, vd +VI_AMO({ return lhs < vs3 ? lhs : vs3; }, int, e16); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamominei32_v.h b/vendor/riscv-isa-sim/riscv/insns/vamominei32_v.h new file mode 100644 index 00000000..6cb8475e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamominei32_v.h @@ -0,0 +1,2 @@ +//vamomine.v vd, (rs1), vs2, vd +VI_AMO({ return lhs < vs3 ? lhs : vs3; }, int, e32); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamominei64_v.h b/vendor/riscv-isa-sim/riscv/insns/vamominei64_v.h new file mode 100644 index 00000000..9ef3d4ee --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamominei64_v.h @@ -0,0 +1,2 @@ +//vamomine.v vd, (rs1), vs2, vd +VI_AMO({ return lhs < vs3 ? lhs : vs3; }, int, e64); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamominei8_v.h b/vendor/riscv-isa-sim/riscv/insns/vamominei8_v.h new file mode 100644 index 00000000..5c035ea4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamominei8_v.h @@ -0,0 +1,2 @@ +//vamomine.v vd, (rs1), vs2, vd +VI_AMO({ return lhs < vs3 ? lhs : vs3; }, int, e8); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamominuei16_v.h b/vendor/riscv-isa-sim/riscv/insns/vamominuei16_v.h new file mode 100644 index 00000000..d4a8f892 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamominuei16_v.h @@ -0,0 +1,2 @@ +//vamominue.v vd, (rs1), vs2, vd +VI_AMO({ return lhs < vs3 ? lhs : vs3;; }, uint, e16); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamominuei32_v.h b/vendor/riscv-isa-sim/riscv/insns/vamominuei32_v.h new file mode 100644 index 00000000..16296c5b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamominuei32_v.h @@ -0,0 +1,2 @@ +//vamominue.v vd, (rs1), vs2, vd +VI_AMO({ return lhs < vs3 ? lhs : vs3;; }, uint, e32); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamominuei64_v.h b/vendor/riscv-isa-sim/riscv/insns/vamominuei64_v.h new file mode 100644 index 00000000..fd850fd0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamominuei64_v.h @@ -0,0 +1,2 @@ +//vamominue.v vd, (rs1), vs2, vd +VI_AMO({ return lhs < vs3 ? lhs : vs3;; }, uint, e64); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamominuei8_v.h b/vendor/riscv-isa-sim/riscv/insns/vamominuei8_v.h new file mode 100644 index 00000000..3749d052 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamominuei8_v.h @@ -0,0 +1,2 @@ +//vamominue.v vd, (rs1), vs2, vd +VI_AMO({ return lhs < vs3 ? lhs : vs3;; }, uint, e8); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoorei16_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoorei16_v.h new file mode 100644 index 00000000..a5ba1caa --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoorei16_v.h @@ -0,0 +1,2 @@ +//vamoore.v vd, (rs1), vs2, vd +VI_AMO({ return lhs | vs3; }, uint, e16); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoorei32_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoorei32_v.h new file mode 100644 index 00000000..94e4458e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoorei32_v.h @@ -0,0 +1,2 @@ +//vamoore.v vd, (rs1), vs2, vd +VI_AMO({ return lhs | vs3; }, uint, e32); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoorei64_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoorei64_v.h new file mode 100644 index 00000000..84e03944 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoorei64_v.h @@ -0,0 +1,2 @@ +//vamoore.v vd, (rs1), vs2, vd +VI_AMO({ return lhs | vs3; }, uint, e64); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoorei8_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoorei8_v.h new file mode 100644 index 00000000..364035db --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoorei8_v.h @@ -0,0 +1,2 @@ +//vamoore.v vd, (rs1), vs2, vd +VI_AMO({ return lhs | vs3; }, uint, e8); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoswapei16_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoswapei16_v.h new file mode 100644 index 00000000..31ff0210 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoswapei16_v.h @@ -0,0 +1,2 @@ +//vamoswape.v vd, (rs1), vs2, vd +VI_AMO({ return vs3; }, uint, e16); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoswapei32_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoswapei32_v.h new file mode 100644 index 00000000..a5741929 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoswapei32_v.h @@ -0,0 +1,2 @@ +//vamoswape.v vd, (rs1), vs2, vd +VI_AMO({ return vs3; }, uint, e32); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoswapei64_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoswapei64_v.h new file mode 100644 index 00000000..58bd0352 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoswapei64_v.h @@ -0,0 +1,2 @@ +//vamoswape.v vd, (rs1), vs2, vd +VI_AMO({ return vs3; }, uint, e64); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoswapei8_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoswapei8_v.h new file mode 100644 index 00000000..af37c8c3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoswapei8_v.h @@ -0,0 +1,2 @@ +//vamoswape.v vd, (rs1), vs2, vd +VI_AMO({ return vs3; }, uint, e8); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoxorei16_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoxorei16_v.h new file mode 100644 index 00000000..61e8c327 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoxorei16_v.h @@ -0,0 +1,2 @@ +//vamoore.v vd, (rs1), vs2, vd +VI_AMO({ return lhs ^ vs3; }, uint, e16); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoxorei32_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoxorei32_v.h new file mode 100644 index 00000000..d48d9515 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoxorei32_v.h @@ -0,0 +1,2 @@ +//vamoore.v vd, (rs1), vs2, vd +VI_AMO({ return lhs ^ vs3; }, uint, e32); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoxorei64_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoxorei64_v.h new file mode 100644 index 00000000..f7a3ca42 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoxorei64_v.h @@ -0,0 +1,2 @@ +//vamoore.v vd, (rs1), vs2, vd +VI_AMO({ return lhs ^ vs3; }, uint, e64); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoxorei8_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoxorei8_v.h new file mode 100644 index 00000000..4b6c7982 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoxorei8_v.h @@ -0,0 +1,2 @@ +//vamoore.v vd, (rs1), vs2, vd +VI_AMO({ return lhs ^ vs3; }, uint, e8); diff --git a/vendor/riscv-isa-sim/riscv/insns/vand_vi.h b/vendor/riscv-isa-sim/riscv/insns/vand_vi.h new file mode 100644 index 00000000..dd9618ba --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vand_vi.h @@ -0,0 +1,5 @@ +// vand.vi vd, simm5, vs2, vm +VI_VI_LOOP +({ + vd = simm5 & vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vand_vv.h b/vendor/riscv-isa-sim/riscv/insns/vand_vv.h new file mode 100644 index 00000000..65558e4b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vand_vv.h @@ -0,0 +1,5 @@ +// vand.vv vd, vs1, vs2, vm +VI_VV_LOOP +({ + vd = vs1 & vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vand_vx.h b/vendor/riscv-isa-sim/riscv/insns/vand_vx.h new file mode 100644 index 00000000..8eea1ed5 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vand_vx.h @@ -0,0 +1,5 @@ +// vand.vx vd, rs1, vs2, vm +VI_VX_LOOP +({ + vd = rs1 & vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vasub_vv.h b/vendor/riscv-isa-sim/riscv/insns/vasub_vv.h new file mode 100644 index 00000000..7dfbdfcf --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vasub_vv.h @@ -0,0 +1,2 @@ +// vasub.vv vd, vs2, vs1 +VI_VV_LOOP_AVG(-); diff --git a/vendor/riscv-isa-sim/riscv/insns/vasub_vx.h b/vendor/riscv-isa-sim/riscv/insns/vasub_vx.h new file mode 100644 index 00000000..185fa9c1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vasub_vx.h @@ -0,0 +1,2 @@ +// vasub.vx vd, vs2, rs1 +VI_VX_LOOP_AVG(-); diff --git a/vendor/riscv-isa-sim/riscv/insns/vasubu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vasubu_vv.h new file mode 100644 index 00000000..902fef99 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vasubu_vv.h @@ -0,0 +1,2 @@ +// vasubu.vv vd, vs2, vs1 +VI_VV_ULOOP_AVG(-); diff --git a/vendor/riscv-isa-sim/riscv/insns/vasubu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vasubu_vx.h new file mode 100644 index 00000000..874dc59e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vasubu_vx.h @@ -0,0 +1,2 @@ +// vasubu.vx vd, vs2, rs1 +VI_VX_ULOOP_AVG(-); diff --git a/vendor/riscv-isa-sim/riscv/insns/vcompress_vm.h b/vendor/riscv-isa-sim/riscv/insns/vcompress_vm.h new file mode 100644 index 00000000..71953459 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vcompress_vm.h @@ -0,0 +1,33 @@ +// vcompress vd, vs2, vs1 +require(P.VU.vstart->read() == 0); +require_align(insn.rd(), P.VU.vflmul); +require_align(insn.rs2(), P.VU.vflmul); +require(insn.rd() != insn.rs2()); +require_noover(insn.rd(), P.VU.vflmul, insn.rs1(), 1); + +reg_t pos = 0; + +VI_GENERAL_LOOP_BASE + const int midx = i / 64; + const int mpos = i % 64; + + bool do_mask = (P.VU.elt(rs1_num, midx) >> mpos) & 0x1; + if (do_mask) { + switch (sew) { + case e8: + P.VU.elt(rd_num, pos, true) = P.VU.elt(rs2_num, i); + break; + case e16: + P.VU.elt(rd_num, pos, true) = P.VU.elt(rs2_num, i); + break; + case e32: + P.VU.elt(rd_num, pos, true) = P.VU.elt(rs2_num, i); + break; + default: + P.VU.elt(rd_num, pos, true) = P.VU.elt(rs2_num, i); + break; + } + + ++pos; + } +VI_LOOP_END; diff --git a/vendor/riscv-isa-sim/riscv/insns/vcpop_m.h b/vendor/riscv-isa-sim/riscv/insns/vcpop_m.h new file mode 100644 index 00000000..cbe45a4b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vcpop_m.h @@ -0,0 +1,23 @@ +// vmpopc rd, vs2, vm +require(P.VU.vsew >= e8 && P.VU.vsew <= e64); +require_vector(true); +reg_t vl = P.VU.vl->read(); +reg_t sew = P.VU.vsew; +reg_t rd_num = insn.rd(); +reg_t rs2_num = insn.rs2(); +require(P.VU.vstart->read() == 0); +reg_t popcount = 0; +for (reg_t i=P.VU.vstart->read(); i(rs2_num, midx ) >> mpos) & 0x1) == 1; + if (insn.v_vm() == 1) { + popcount += vs2_lsb; + } else { + bool do_mask = (P.VU.elt(0, midx) >> mpos) & 0x1; + popcount += (vs2_lsb && do_mask); + } +} +P.VU.vstart->write(0); +WRITE_RD(popcount); diff --git a/vendor/riscv-isa-sim/riscv/insns/vdiv_vv.h b/vendor/riscv-isa-sim/riscv/insns/vdiv_vv.h new file mode 100644 index 00000000..0d4bd0d8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vdiv_vv.h @@ -0,0 +1,10 @@ +// vdiv.vv vd, vs2, vs1 +VI_VV_LOOP +({ + if (vs1 == 0) + vd = -1; + else if (vs2 == (INT64_MIN >> (64 - sew)) && vs1 == -1) + vd = vs2; + else + vd = vs2 / vs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vdiv_vx.h b/vendor/riscv-isa-sim/riscv/insns/vdiv_vx.h new file mode 100644 index 00000000..40529527 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vdiv_vx.h @@ -0,0 +1,10 @@ +// vdiv.vx vd, vs2, rs1 +VI_VX_LOOP +({ + if(rs1 == 0) + vd = -1; + else if(vs2 == (INT64_MIN >> (64 - sew)) && rs1 == -1) + vd = vs2; + else + vd = vs2 / rs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vdivu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vdivu_vv.h new file mode 100644 index 00000000..ef6e777d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vdivu_vv.h @@ -0,0 +1,8 @@ +// vdivu.vv vd, vs2, vs1 +VI_VV_ULOOP +({ + if(vs1 == 0) + vd = -1; + else + vd = vs2 / vs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vdivu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vdivu_vx.h new file mode 100644 index 00000000..7ffe1c68 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vdivu_vx.h @@ -0,0 +1,8 @@ +// vdivu.vx vd, vs2, rs1 +VI_VX_ULOOP +({ + if(rs1 == 0) + vd = -1; + else + vd = vs2 / rs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfadd_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfadd_vf.h new file mode 100644 index 00000000..2b808e0c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfadd_vf.h @@ -0,0 +1,11 @@ +// vfadd.vf vd, vs2, rs1 +VI_VFP_VF_LOOP +({ + vd = f16_add(rs1, vs2); +}, +{ + vd = f32_add(rs1, vs2); +}, +{ + vd = f64_add(rs1, vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfadd_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfadd_vv.h new file mode 100644 index 00000000..ce94921d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfadd_vv.h @@ -0,0 +1,11 @@ +// vfadd.vv vd, vs2, vs1 +VI_VFP_VV_LOOP +({ + vd = f16_add(vs1, vs2); +}, +{ + vd = f32_add(vs1, vs2); +}, +{ + vd = f64_add(vs1, vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfclass_v.h b/vendor/riscv-isa-sim/riscv/insns/vfclass_v.h new file mode 100644 index 00000000..a307d2d1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfclass_v.h @@ -0,0 +1,11 @@ +// vfclass.v vd, vs2, vm +VI_VFP_V_LOOP +({ + vd = f16(f16_classify(vs2)); +}, +{ + vd = f32(f32_classify(vs2)); +}, +{ + vd = f64(f64_classify(vs2)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfcvt_f_x_v.h b/vendor/riscv-isa-sim/riscv/insns/vfcvt_f_x_v.h new file mode 100644 index 00000000..d094c140 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfcvt_f_x_v.h @@ -0,0 +1,7 @@ +// vfcvt.f.x.v vd, vd2, vm +VI_VFP_CVT_INT_TO_FP( + { vd = i32_to_f16(vs2); }, // BODY16 + { vd = i32_to_f32(vs2); }, // BODY32 + { vd = i64_to_f64(vs2); }, // BODY64 + int // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfcvt_f_xu_v.h b/vendor/riscv-isa-sim/riscv/insns/vfcvt_f_xu_v.h new file mode 100644 index 00000000..64dbb1c1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfcvt_f_xu_v.h @@ -0,0 +1,7 @@ +// vfcvt.f.xu.v vd, vd2, vm +VI_VFP_CVT_INT_TO_FP( + { vd = ui32_to_f16(vs2); }, // BODY16 + { vd = ui32_to_f32(vs2); }, // BODY32 + { vd = ui64_to_f64(vs2); }, // BODY64 + uint // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfcvt_rtz_x_f_v.h b/vendor/riscv-isa-sim/riscv/insns/vfcvt_rtz_x_f_v.h new file mode 100644 index 00000000..ecdfa22d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfcvt_rtz_x_f_v.h @@ -0,0 +1,7 @@ +// vfcvt.rtz.x.f.v vd, vd2, vm +VI_VFP_CVT_FP_TO_INT( + { vd = f16_to_i16(vs2, softfloat_round_minMag, true); }, // BODY16 + { vd = f32_to_i32(vs2, softfloat_round_minMag, true); }, // BODY32 + { vd = f64_to_i64(vs2, softfloat_round_minMag, true); }, // BODY64 + int // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfcvt_rtz_xu_f_v.h b/vendor/riscv-isa-sim/riscv/insns/vfcvt_rtz_xu_f_v.h new file mode 100644 index 00000000..87585d24 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfcvt_rtz_xu_f_v.h @@ -0,0 +1,7 @@ +// vfcvt.rtz.xu.f.v vd, vd2, vm +VI_VFP_CVT_FP_TO_INT( + { vd = f16_to_ui16(vs2, softfloat_round_minMag, true); }, // BODY16 + { vd = f32_to_ui32(vs2, softfloat_round_minMag, true); }, // BODY32 + { vd = f64_to_ui64(vs2, softfloat_round_minMag, true); }, // BODY64 + uint // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfcvt_x_f_v.h b/vendor/riscv-isa-sim/riscv/insns/vfcvt_x_f_v.h new file mode 100644 index 00000000..4f21b52f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfcvt_x_f_v.h @@ -0,0 +1,7 @@ +// vfcvt.x.f.v vd, vd2, vm +VI_VFP_CVT_FP_TO_INT( + { vd = f16_to_i16(vs2, softfloat_roundingMode, true); }, // BODY16 + { vd = f32_to_i32(vs2, softfloat_roundingMode, true); }, // BODY32 + { vd = f64_to_i64(vs2, softfloat_roundingMode, true); }, // BODY64 + int // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfcvt_xu_f_v.h b/vendor/riscv-isa-sim/riscv/insns/vfcvt_xu_f_v.h new file mode 100644 index 00000000..ba50fff6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfcvt_xu_f_v.h @@ -0,0 +1,7 @@ +// vfcvt.xu.f.v vd, vd2, vm +VI_VFP_CVT_FP_TO_INT( + { vd = f16_to_ui16(vs2, softfloat_roundingMode, true); }, // BODY16 + { vd = f32_to_ui32(vs2, softfloat_roundingMode, true); }, // BODY32 + { vd = f64_to_ui64(vs2, softfloat_roundingMode, true); }, // BODY64 + uint // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfdiv_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfdiv_vf.h new file mode 100644 index 00000000..a703ef02 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfdiv_vf.h @@ -0,0 +1,11 @@ +// vfdiv.vf vd, vs2, rs1 +VI_VFP_VF_LOOP +({ + vd = f16_div(vs2, rs1); +}, +{ + vd = f32_div(vs2, rs1); +}, +{ + vd = f64_div(vs2, rs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfdiv_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfdiv_vv.h new file mode 100644 index 00000000..c66d7516 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfdiv_vv.h @@ -0,0 +1,11 @@ +// vfdiv.vv vd, vs2, vs1 +VI_VFP_VV_LOOP +({ + vd = f16_div(vs2, vs1); +}, +{ + vd = f32_div(vs2, vs1); +}, +{ + vd = f64_div(vs2, vs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfirst_m.h b/vendor/riscv-isa-sim/riscv/insns/vfirst_m.h new file mode 100644 index 00000000..5b768ed4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfirst_m.h @@ -0,0 +1,20 @@ +// vmfirst rd, vs2 +require(P.VU.vsew >= e8 && P.VU.vsew <= e64); +require_vector(true); +reg_t vl = P.VU.vl->read(); +reg_t sew = P.VU.vsew; +reg_t rd_num = insn.rd(); +reg_t rs2_num = insn.rs2(); +require(P.VU.vstart->read() == 0); +reg_t pos = -1; +for (reg_t i=P.VU.vstart->read(); i < vl; ++i) { + VI_LOOP_ELEMENT_SKIP() + + bool vs2_lsb = ((P.VU.elt(rs2_num, midx ) >> mpos) & 0x1) == 1; + if (vs2_lsb) { + pos = i; + break; + } +} +P.VU.vstart->write(0); +WRITE_RD(pos); diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmacc_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfmacc_vf.h new file mode 100644 index 00000000..61578d33 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmacc_vf.h @@ -0,0 +1,11 @@ +// vfmacc.vf vd, rs1, vs2, vm # vd[i] = +(vs2[i] * x[rs1]) + vd[i] +VI_VFP_VF_LOOP +({ + vd = f16_mulAdd(rs1, vs2, vd); +}, +{ + vd = f32_mulAdd(rs1, vs2, vd); +}, +{ + vd = f64_mulAdd(rs1, vs2, vd); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmacc_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfmacc_vv.h new file mode 100644 index 00000000..499b1d4d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmacc_vv.h @@ -0,0 +1,11 @@ +// vfmacc.vv vd, rs1, vs2, vm # vd[i] = +(vs2[i] * vs1[i]) + vd[i] +VI_VFP_VV_LOOP +({ + vd = f16_mulAdd(vs1, vs2, vd); +}, +{ + vd = f32_mulAdd(vs1, vs2, vd); +}, +{ + vd = f64_mulAdd(vs1, vs2, vd); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmadd_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfmadd_vf.h new file mode 100644 index 00000000..2a014295 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmadd_vf.h @@ -0,0 +1,11 @@ +// vfmadd: vd[i] = +(vd[i] * f[rs1]) + vs2[i] +VI_VFP_VF_LOOP +({ + vd = f16_mulAdd(vd, rs1, vs2); +}, +{ + vd = f32_mulAdd(vd, rs1, vs2); +}, +{ + vd = f64_mulAdd(vd, rs1, vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmadd_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfmadd_vv.h new file mode 100644 index 00000000..7ef734f8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmadd_vv.h @@ -0,0 +1,11 @@ +// vfmadd: vd[i] = +(vd[i] * vs1[i]) + vs2[i] +VI_VFP_VV_LOOP +({ + vd = f16_mulAdd(vd, vs1, vs2); +}, +{ + vd = f32_mulAdd(vd, vs1, vs2); +}, +{ + vd = f64_mulAdd(vd, vs1, vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmax_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfmax_vf.h new file mode 100644 index 00000000..c4b74cbd --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmax_vf.h @@ -0,0 +1,11 @@ +// vfmax +VI_VFP_VF_LOOP +({ + vd = f16_max(vs2, rs1); +}, +{ + vd = f32_max(vs2, rs1); +}, +{ + vd = f64_max(vs2, rs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmax_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfmax_vv.h new file mode 100644 index 00000000..6439c899 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmax_vv.h @@ -0,0 +1,11 @@ +// vfmax +VI_VFP_VV_LOOP +({ + vd = f16_max(vs2, vs1); +}, +{ + vd = f32_max(vs2, vs1); +}, +{ + vd = f64_max(vs2, vs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmerge_vfm.h b/vendor/riscv-isa-sim/riscv/insns/vfmerge_vfm.h new file mode 100644 index 00000000..d82dfef5 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmerge_vfm.h @@ -0,0 +1,4 @@ +// vfmerge_vf vd, vs2, vs1, vm +VI_VF_MERGE_LOOP({ + vd = use_first ? rs1 : vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmin_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfmin_vf.h new file mode 100644 index 00000000..1560cdf7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmin_vf.h @@ -0,0 +1,11 @@ +// vfmin vd, vs2, rs1 +VI_VFP_VF_LOOP +({ + vd = f16_min(vs2, rs1); +}, +{ + vd = f32_min(vs2, rs1); +}, +{ + vd = f64_min(vs2, rs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmin_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfmin_vv.h new file mode 100644 index 00000000..882a7740 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmin_vv.h @@ -0,0 +1,11 @@ +// vfmin vd, vs2, vs1 +VI_VFP_VV_LOOP +({ + vd = f16_min(vs2, vs1); +}, +{ + vd = f32_min(vs2, vs1); +}, +{ + vd = f64_min(vs2, vs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmsac_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfmsac_vf.h new file mode 100644 index 00000000..8af397b9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmsac_vf.h @@ -0,0 +1,11 @@ +// vfmsac: vd[i] = +(f[rs1] * vs2[i]) - vd[i] +VI_VFP_VF_LOOP +({ + vd = f16_mulAdd(rs1, vs2, f16(vd.v ^ F16_SIGN)); +}, +{ + vd = f32_mulAdd(rs1, vs2, f32(vd.v ^ F32_SIGN)); +}, +{ + vd = f64_mulAdd(rs1, vs2, f64(vd.v ^ F64_SIGN)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmsac_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfmsac_vv.h new file mode 100644 index 00000000..3bb50e50 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmsac_vv.h @@ -0,0 +1,11 @@ +// vfmsac: vd[i] = +(vs1[i] * vs2[i]) - vd[i] +VI_VFP_VV_LOOP +({ + vd = f16_mulAdd(vs1, vs2, f16(vd.v ^ F16_SIGN)); +}, +{ + vd = f32_mulAdd(vs1, vs2, f32(vd.v ^ F32_SIGN)); +}, +{ + vd = f64_mulAdd(vs1, vs2, f64(vd.v ^ F64_SIGN)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmsub_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfmsub_vf.h new file mode 100644 index 00000000..ab77b4c6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmsub_vf.h @@ -0,0 +1,11 @@ +// vfmsub: vd[i] = +(vd[i] * f[rs1]) - vs2[i] +VI_VFP_VF_LOOP +({ + vd = f16_mulAdd(vd, rs1, f16(vs2.v ^ F16_SIGN)); +}, +{ + vd = f32_mulAdd(vd, rs1, f32(vs2.v ^ F32_SIGN)); +}, +{ + vd = f64_mulAdd(vd, rs1, f64(vs2.v ^ F64_SIGN)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmsub_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfmsub_vv.h new file mode 100644 index 00000000..3cac937f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmsub_vv.h @@ -0,0 +1,11 @@ +// vfmsub: vd[i] = +(vd[i] * vs1[i]) - vs2[i] +VI_VFP_VV_LOOP +({ + vd = f16_mulAdd(vd, vs1, f16(vs2.v ^ F16_SIGN)); +}, +{ + vd = f32_mulAdd(vd, vs1, f32(vs2.v ^ F32_SIGN)); +}, +{ + vd = f64_mulAdd(vd, vs1, f64(vs2.v ^ F64_SIGN)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmul_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfmul_vf.h new file mode 100644 index 00000000..f5f63e49 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmul_vf.h @@ -0,0 +1,11 @@ +// vfmul.vf vd, vs2, rs1, vm +VI_VFP_VF_LOOP +({ + vd = f16_mul(vs2, rs1); +}, +{ + vd = f32_mul(vs2, rs1); +}, +{ + vd = f64_mul(vs2, rs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmul_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfmul_vv.h new file mode 100644 index 00000000..7930fd03 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmul_vv.h @@ -0,0 +1,11 @@ +// vfmul.vv vd, vs1, vs2, vm +VI_VFP_VV_LOOP +({ + vd = f16_mul(vs1, vs2); +}, +{ + vd = f32_mul(vs1, vs2); +}, +{ + vd = f64_mul(vs1, vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmv_f_s.h b/vendor/riscv-isa-sim/riscv/insns/vfmv_f_s.h new file mode 100644 index 00000000..81605eaf --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmv_f_s.h @@ -0,0 +1,38 @@ +// vfmv_f_s: rd = vs2[0] (rs1=0) +require_vector(true); +require_fp; +require((P.VU.vsew == e16 && p->extension_enabled(EXT_ZFH)) || + (P.VU.vsew == e32 && p->extension_enabled('F')) || + (P.VU.vsew == e64 && p->extension_enabled('D'))); +require(STATE.frm->read() < 0x5); + +reg_t rs2_num = insn.rs2(); +uint64_t vs2_0 = 0; +const reg_t sew = P.VU.vsew; +switch(sew) { + case e16: + vs2_0 = P.VU.elt(rs2_num, 0); + break; + case e32: + vs2_0 = P.VU.elt(rs2_num, 0); + break; + case e64: + vs2_0 = P.VU.elt(rs2_num, 0); + break; + default: + require(0); + break; +} + +// nan_extened +if (FLEN > sew) { + vs2_0 = vs2_0 | (UINT64_MAX << sew); +} + +if (FLEN == 64) { + WRITE_FRD(f64(vs2_0)); +} else { + WRITE_FRD(f32(vs2_0)); +} + +P.VU.vstart->write(0); diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmv_s_f.h b/vendor/riscv-isa-sim/riscv/insns/vfmv_s_f.h new file mode 100644 index 00000000..edc376e8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmv_s_f.h @@ -0,0 +1,29 @@ +// vfmv_s_f: vd[0] = rs1 (vs2=0) +require_vector(true); +require_fp; +require((P.VU.vsew == e16 && p->extension_enabled(EXT_ZFH)) || + (P.VU.vsew == e32 && p->extension_enabled('F')) || + (P.VU.vsew == e64 && p->extension_enabled('D'))); +require(STATE.frm->read() < 0x5); + +reg_t vl = P.VU.vl->read(); + +if (vl > 0 && P.VU.vstart->read() < vl) { + reg_t rd_num = insn.rd(); + + switch(P.VU.vsew) { + case e16: + P.VU.elt(rd_num, 0, true) = f16(FRS1).v; + break; + case e32: + P.VU.elt(rd_num, 0, true) = f32(FRS1).v; + break; + case e64: + if (FLEN == 64) + P.VU.elt(rd_num, 0, true) = f64(FRS1).v; + else + P.VU.elt(rd_num, 0, true) = f32(FRS1).v; + break; + } +} +P.VU.vstart->write(0); diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmv_v_f.h b/vendor/riscv-isa-sim/riscv/insns/vfmv_v_f.h new file mode 100644 index 00000000..50b7513c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmv_v_f.h @@ -0,0 +1,4 @@ +// vfmv_vf vd, vs1 +VI_VF_MERGE_LOOP({ + vd = rs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfncvt_f_f_w.h b/vendor/riscv-isa-sim/riscv/insns/vfncvt_f_f_w.h new file mode 100644 index 00000000..f4996f5d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfncvt_f_f_w.h @@ -0,0 +1,9 @@ +// vfncvt.f.f.v vd, vs2, vm +VI_VFP_NCVT_FP_TO_FP( + {;}, // BODY16 + { vd = f32_to_f16(vs2); }, // BODY32 + { vd = f64_to_f32(vs2); }, // BODY64 + {;}, // CHECK16 + { require_extension(EXT_ZFH); }, // CHECK32 + { require_extension('D'); } // CHECK64 +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfncvt_f_x_w.h b/vendor/riscv-isa-sim/riscv/insns/vfncvt_f_x_w.h new file mode 100644 index 00000000..d587be26 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfncvt_f_x_w.h @@ -0,0 +1,10 @@ +// vfncvt.f.x.v vd, vs2, vm +VI_VFP_NCVT_INT_TO_FP( + {;}, // BODY16 + { vd = i32_to_f16(vs2); }, // BODY32 + { vd = i64_to_f32(vs2); }, // BODY64 + {;}, // CHECK16 + { require_extension(EXT_ZFH); }, // CHECK32 + { require_extension('F'); }, // CHECK64 + int // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfncvt_f_xu_w.h b/vendor/riscv-isa-sim/riscv/insns/vfncvt_f_xu_w.h new file mode 100644 index 00000000..5e0e34fa --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfncvt_f_xu_w.h @@ -0,0 +1,10 @@ +// vfncvt.f.xu.v vd, vs2, vm +VI_VFP_NCVT_INT_TO_FP( + {;}, // BODY16 + { vd = ui32_to_f16(vs2); }, // BODY32 + { vd = ui64_to_f32(vs2); }, // BODY64 + {;}, // CHECK16 + { require_extension(EXT_ZFH); }, // CHECK32 + { require_extension('F'); }, // CHECK64 + uint // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfncvt_rod_f_f_w.h b/vendor/riscv-isa-sim/riscv/insns/vfncvt_rod_f_f_w.h new file mode 100644 index 00000000..89bdc05f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfncvt_rod_f_f_w.h @@ -0,0 +1,15 @@ +// vfncvt.rod.f.f.v vd, vs2, vm +VI_VFP_NCVT_FP_TO_FP( + {;}, // BODY16 + { // BODY32 + softfloat_roundingMode = softfloat_round_odd; + vd = f32_to_f16(vs2); + }, + { // BODY64 + softfloat_roundingMode = softfloat_round_odd; + vd = f64_to_f32(vs2); + }, + {;}, // CHECK16 + { require_extension(EXT_ZFH); }, // CHECK32 + { require_extension('F'); } // CHECK64 +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfncvt_rtz_x_f_w.h b/vendor/riscv-isa-sim/riscv/insns/vfncvt_rtz_x_f_w.h new file mode 100644 index 00000000..23b4d5e2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfncvt_rtz_x_f_w.h @@ -0,0 +1,10 @@ +// vfncvt.rtz.x.f.w vd, vs2, vm +VI_VFP_NCVT_FP_TO_INT( + { vd = f16_to_i8(vs2, softfloat_round_minMag, true); }, // BODY16 + { vd = f32_to_i16(vs2, softfloat_round_minMag, true); }, // BODY32 + { vd = f64_to_i32(vs2, softfloat_round_minMag, true); }, // BODY64 + { require_extension(EXT_ZFH); }, // CHECK16 + { require(p->extension_enabled('F')); }, // CHECK32 + { require(p->extension_enabled('D')); }, // CHECK64 + int // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfncvt_rtz_xu_f_w.h b/vendor/riscv-isa-sim/riscv/insns/vfncvt_rtz_xu_f_w.h new file mode 100644 index 00000000..f55c680b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfncvt_rtz_xu_f_w.h @@ -0,0 +1,10 @@ +// vfncvt.rtz.xu.f.w vd, vs2, vm +VI_VFP_NCVT_FP_TO_INT( + { vd = f16_to_ui8(vs2, softfloat_round_minMag, true); }, // BODY16 + { vd = f32_to_ui16(vs2, softfloat_round_minMag, true); }, // BODY32 + { vd = f64_to_ui32(vs2, softfloat_round_minMag, true); }, // BODY64 + { require_extension(EXT_ZFH); }, // CHECK16 + { require(p->extension_enabled('F')); }, // CHECK32 + { require(p->extension_enabled('D')); }, // CHECK64 + uint // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfncvt_x_f_w.h b/vendor/riscv-isa-sim/riscv/insns/vfncvt_x_f_w.h new file mode 100644 index 00000000..a7f3c334 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfncvt_x_f_w.h @@ -0,0 +1,10 @@ +// vfncvt.x.f.w vd, vs2, vm +VI_VFP_NCVT_FP_TO_INT( + { vd = f16_to_i8(vs2, softfloat_roundingMode, true); }, // BODY16 + { vd = f32_to_i16(vs2, softfloat_roundingMode, true); }, // BODY32 + { vd = f64_to_i32(vs2, softfloat_roundingMode, true); }, // BODY64 + { require_extension(EXT_ZFH); }, // CHECK16 + { require(p->extension_enabled('F')); }, // CHECK32 + { require(p->extension_enabled('D')); }, // CHECK64 + int // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfncvt_xu_f_w.h b/vendor/riscv-isa-sim/riscv/insns/vfncvt_xu_f_w.h new file mode 100644 index 00000000..02046e8b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfncvt_xu_f_w.h @@ -0,0 +1,10 @@ +// vfncvt.xu.f.w vd, vs2, vm +VI_VFP_NCVT_FP_TO_INT( + { vd = f16_to_ui8(vs2, softfloat_roundingMode, true); }, // BODY16 + { vd = f32_to_ui16(vs2, softfloat_roundingMode, true); }, // BODY32 + { vd = f64_to_ui32(vs2, softfloat_roundingMode, true); }, // BODY64 + { require_extension(EXT_ZFH); }, // CHECK16 + { require(p->extension_enabled('F')); }, // CHECK32 + { require(p->extension_enabled('D')); }, // CHECK64 + uint // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfnmacc_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfnmacc_vf.h new file mode 100644 index 00000000..1b99302c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfnmacc_vf.h @@ -0,0 +1,11 @@ +// vfnmacc: vd[i] = -(f[rs1] * vs2[i]) - vd[i] +VI_VFP_VF_LOOP +({ + vd = f16_mulAdd(rs1, f16(vs2.v ^ F16_SIGN), f16(vd.v ^ F16_SIGN)); +}, +{ + vd = f32_mulAdd(rs1, f32(vs2.v ^ F32_SIGN), f32(vd.v ^ F32_SIGN)); +}, +{ + vd = f64_mulAdd(rs1, f64(vs2.v ^ F64_SIGN), f64(vd.v ^ F64_SIGN)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfnmacc_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfnmacc_vv.h new file mode 100644 index 00000000..7200e063 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfnmacc_vv.h @@ -0,0 +1,11 @@ +// vfnmacc: vd[i] = -(vs1[i] * vs2[i]) - vd[i] +VI_VFP_VV_LOOP +({ + vd = f16_mulAdd(f16(vs2.v ^ F16_SIGN), vs1, f16(vd.v ^ F16_SIGN)); +}, +{ + vd = f32_mulAdd(f32(vs2.v ^ F32_SIGN), vs1, f32(vd.v ^ F32_SIGN)); +}, +{ + vd = f64_mulAdd(f64(vs2.v ^ F64_SIGN), vs1, f64(vd.v ^ F64_SIGN)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfnmadd_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfnmadd_vf.h new file mode 100644 index 00000000..cb9c217f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfnmadd_vf.h @@ -0,0 +1,11 @@ +// vfnmadd: vd[i] = -(vd[i] * f[rs1]) - vs2[i] +VI_VFP_VF_LOOP +({ + vd = f16_mulAdd(f16(vd.v ^ F16_SIGN), rs1, f16(vs2.v ^ F16_SIGN)); +}, +{ + vd = f32_mulAdd(f32(vd.v ^ F32_SIGN), rs1, f32(vs2.v ^ F32_SIGN)); +}, +{ + vd = f64_mulAdd(f64(vd.v ^ F64_SIGN), rs1, f64(vs2.v ^ F64_SIGN)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfnmadd_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfnmadd_vv.h new file mode 100644 index 00000000..7160ed7d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfnmadd_vv.h @@ -0,0 +1,11 @@ +// vfnmadd: vd[i] = -(vd[i] * vs1[i]) - vs2[i] +VI_VFP_VV_LOOP +({ + vd = f16_mulAdd(f16(vd.v ^ F16_SIGN), vs1, f16(vs2.v ^ F16_SIGN)); +}, +{ + vd = f32_mulAdd(f32(vd.v ^ F32_SIGN), vs1, f32(vs2.v ^ F32_SIGN)); +}, +{ + vd = f64_mulAdd(f64(vd.v ^ F64_SIGN), vs1, f64(vs2.v ^ F64_SIGN)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfnmsac_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfnmsac_vf.h new file mode 100644 index 00000000..aa6baa30 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfnmsac_vf.h @@ -0,0 +1,11 @@ +// vfnmsac: vd[i] = -(f[rs1] * vs2[i]) + vd[i] +VI_VFP_VF_LOOP +({ + vd = f16_mulAdd(rs1, f16(vs2.v ^ F16_SIGN), vd); +}, +{ + vd = f32_mulAdd(rs1, f32(vs2.v ^ F32_SIGN), vd); +}, +{ + vd = f64_mulAdd(rs1, f64(vs2.v ^ F64_SIGN), vd); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfnmsac_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfnmsac_vv.h new file mode 100644 index 00000000..47db61d2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfnmsac_vv.h @@ -0,0 +1,11 @@ +// vfnmsac.vv vd, vs1, vs2, vm # vd[i] = -(vs2[i] * vs1[i]) + vd[i] +VI_VFP_VV_LOOP +({ + vd = f16_mulAdd(f16(vs1.v ^ F16_SIGN), vs2, vd); +}, +{ + vd = f32_mulAdd(f32(vs1.v ^ F32_SIGN), vs2, vd); +}, +{ + vd = f64_mulAdd(f64(vs1.v ^ F64_SIGN), vs2, vd); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfnmsub_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfnmsub_vf.h new file mode 100644 index 00000000..43aa9e26 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfnmsub_vf.h @@ -0,0 +1,11 @@ +// vfnmsub: vd[i] = -(vd[i] * f[rs1]) + vs2[i] +VI_VFP_VF_LOOP +({ + vd = f16_mulAdd(f16(vd.v ^ F16_SIGN), rs1, vs2); +}, +{ + vd = f32_mulAdd(f32(vd.v ^ F32_SIGN), rs1, vs2); +}, +{ + vd = f64_mulAdd(f64(vd.v ^ F64_SIGN), rs1, vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfnmsub_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfnmsub_vv.h new file mode 100644 index 00000000..2a45c8fc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfnmsub_vv.h @@ -0,0 +1,11 @@ +// vfnmsub: vd[i] = -(vd[i] * vs1[i]) + vs2[i] +VI_VFP_VV_LOOP +({ + vd = f16_mulAdd(f16(vd.v ^ F16_SIGN), vs1, vs2); +}, +{ + vd = f32_mulAdd(f32(vd.v ^ F32_SIGN), vs1, vs2); +}, +{ + vd = f64_mulAdd(f64(vd.v ^ F64_SIGN), vs1, vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfrdiv_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfrdiv_vf.h new file mode 100644 index 00000000..b283343c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfrdiv_vf.h @@ -0,0 +1,11 @@ +// vfrdiv.vf vd, vs2, rs1, vm # scalar-vector, vd[i] = f[rs1]/vs2[i] +VI_VFP_VF_LOOP +({ + vd = f16_div(rs1, vs2); +}, +{ + vd = f32_div(rs1, vs2); +}, +{ + vd = f64_div(rs1, vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfrec7_v.h b/vendor/riscv-isa-sim/riscv/insns/vfrec7_v.h new file mode 100644 index 00000000..69c026b0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfrec7_v.h @@ -0,0 +1,11 @@ +// vfclass.v vd, vs2, vm +VI_VFP_V_LOOP +({ + vd = f16_recip7(vs2); +}, +{ + vd = f32_recip7(vs2); +}, +{ + vd = f64_recip7(vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfredmax_vs.h b/vendor/riscv-isa-sim/riscv/insns/vfredmax_vs.h new file mode 100644 index 00000000..f19ec597 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfredmax_vs.h @@ -0,0 +1,12 @@ +// vfredmax vd, vs2, vs1 +bool is_propagate = false; +VI_VFP_VV_LOOP_REDUCTION +({ + vd_0 = f16_max(vd_0, vs2); +}, +{ + vd_0 = f32_max(vd_0, vs2); +}, +{ + vd_0 = f64_max(vd_0, vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfredmin_vs.h b/vendor/riscv-isa-sim/riscv/insns/vfredmin_vs.h new file mode 100644 index 00000000..e3cf1513 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfredmin_vs.h @@ -0,0 +1,12 @@ +// vfredmin vd, vs2, vs1 +bool is_propagate = false; +VI_VFP_VV_LOOP_REDUCTION +({ + vd_0 = f16_min(vd_0, vs2); +}, +{ + vd_0 = f32_min(vd_0, vs2); +}, +{ + vd_0 = f64_min(vd_0, vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfredosum_vs.h b/vendor/riscv-isa-sim/riscv/insns/vfredosum_vs.h new file mode 100644 index 00000000..2438a7ba --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfredosum_vs.h @@ -0,0 +1,12 @@ +// vfredosum: vd[0] = sum( vs2[*] , vs1[0] ) +bool is_propagate = false; +VI_VFP_VV_LOOP_REDUCTION +({ + vd_0 = f16_add(vd_0, vs2); +}, +{ + vd_0 = f32_add(vd_0, vs2); +}, +{ + vd_0 = f64_add(vd_0, vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfredusum_vs.h b/vendor/riscv-isa-sim/riscv/insns/vfredusum_vs.h new file mode 100644 index 00000000..bad7308e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfredusum_vs.h @@ -0,0 +1,12 @@ +// vfredsum: vd[0] = sum( vs2[*] , vs1[0] ) +bool is_propagate = true; +VI_VFP_VV_LOOP_REDUCTION +({ + vd_0 = f16_add(vd_0, vs2); +}, +{ + vd_0 = f32_add(vd_0, vs2); +}, +{ + vd_0 = f64_add(vd_0, vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfrsqrt7_v.h b/vendor/riscv-isa-sim/riscv/insns/vfrsqrt7_v.h new file mode 100644 index 00000000..a0737641 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfrsqrt7_v.h @@ -0,0 +1,11 @@ +// vfclass.v vd, vs2, vm +VI_VFP_V_LOOP +({ + vd = f16_rsqrte7(vs2); +}, +{ + vd = f32_rsqrte7(vs2); +}, +{ + vd = f64_rsqrte7(vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfrsub_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfrsub_vf.h new file mode 100644 index 00000000..7fb26a5b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfrsub_vf.h @@ -0,0 +1,11 @@ +// vfsub.vf vd, vs2, rs1 +VI_VFP_VF_LOOP +({ + vd = f16_sub(rs1, vs2); +}, +{ + vd = f32_sub(rs1, vs2); +}, +{ + vd = f64_sub(rs1, vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfsgnj_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfsgnj_vf.h new file mode 100644 index 00000000..ce06185e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfsgnj_vf.h @@ -0,0 +1,11 @@ +// vfsgnj vd, vs2, vs1 +VI_VFP_VF_LOOP +({ + vd = fsgnj16(vs2.v, rs1.v, false, false); +}, +{ + vd = fsgnj32(vs2.v, rs1.v, false, false); +}, +{ + vd = fsgnj64(vs2.v, rs1.v, false, false); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfsgnj_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfsgnj_vv.h new file mode 100644 index 00000000..722cb29c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfsgnj_vv.h @@ -0,0 +1,11 @@ +// vfsgnj +VI_VFP_VV_LOOP +({ + vd = fsgnj16(vs2.v, vs1.v, false, false); +}, +{ + vd = fsgnj32(vs2.v, vs1.v, false, false); +}, +{ + vd = fsgnj64(vs2.v, vs1.v, false, false); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfsgnjn_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfsgnjn_vf.h new file mode 100644 index 00000000..e4894124 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfsgnjn_vf.h @@ -0,0 +1,11 @@ +// vfsgnn +VI_VFP_VF_LOOP +({ + vd = fsgnj16(vs2.v, rs1.v, true, false); +}, +{ + vd = fsgnj32(vs2.v, rs1.v, true, false); +}, +{ + vd = fsgnj64(vs2.v, rs1.v, true, false); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfsgnjn_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfsgnjn_vv.h new file mode 100644 index 00000000..1d91f691 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfsgnjn_vv.h @@ -0,0 +1,11 @@ +// vfsgnn +VI_VFP_VV_LOOP +({ + vd = fsgnj16(vs2.v, vs1.v, true, false); +}, +{ + vd = fsgnj32(vs2.v, vs1.v, true, false); +}, +{ + vd = fsgnj64(vs2.v, vs1.v, true, false); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfsgnjx_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfsgnjx_vf.h new file mode 100644 index 00000000..7be164c7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfsgnjx_vf.h @@ -0,0 +1,11 @@ +// vfsgnx +VI_VFP_VF_LOOP +({ + vd = fsgnj16(vs2.v, rs1.v, false, true); +}, +{ + vd = fsgnj32(vs2.v, rs1.v, false, true); +}, +{ + vd = fsgnj64(vs2.v, rs1.v, false, true); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfsgnjx_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfsgnjx_vv.h new file mode 100644 index 00000000..b04b8454 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfsgnjx_vv.h @@ -0,0 +1,11 @@ +// vfsgnx +VI_VFP_VV_LOOP +({ + vd = fsgnj16(vs2.v, vs1.v, false, true); +}, +{ + vd = fsgnj32(vs2.v, vs1.v, false, true); +}, +{ + vd = fsgnj64(vs2.v, vs1.v, false, true); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfslide1down_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfslide1down_vf.h new file mode 100644 index 00000000..66eeaccb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfslide1down_vf.h @@ -0,0 +1,36 @@ +//vfslide1down.vf vd, vs2, rs1 +VI_CHECK_SLIDE(false); + +VI_VFP_LOOP_BASE +if (i != vl - 1) { + switch (P.VU.vsew) { + case e16: { + VI_XI_SLIDEDOWN_PARAMS(e16, 1); + vd = vs2; + } + break; + case e32: { + VI_XI_SLIDEDOWN_PARAMS(e32, 1); + vd = vs2; + } + break; + case e64: { + VI_XI_SLIDEDOWN_PARAMS(e64, 1); + vd = vs2; + } + break; + } +} else { + switch (P.VU.vsew) { + case e16: + P.VU.elt(rd_num, vl - 1, true) = f16(FRS1); + break; + case e32: + P.VU.elt(rd_num, vl - 1, true) = f32(FRS1); + break; + case e64: + P.VU.elt(rd_num, vl - 1, true) = f64(FRS1); + break; + } +} +VI_VFP_LOOP_END diff --git a/vendor/riscv-isa-sim/riscv/insns/vfslide1up_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfslide1up_vf.h new file mode 100644 index 00000000..b9c2817c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfslide1up_vf.h @@ -0,0 +1,36 @@ +//vfslide1up.vf vd, vs2, rs1 +VI_CHECK_SLIDE(true); + +VI_VFP_LOOP_BASE +if (i != 0) { + switch (P.VU.vsew) { + case e16: { + VI_XI_SLIDEUP_PARAMS(e16, 1); + vd = vs2; + } + break; + case e32: { + VI_XI_SLIDEUP_PARAMS(e32, 1); + vd = vs2; + } + break; + case e64: { + VI_XI_SLIDEUP_PARAMS(e64, 1); + vd = vs2; + } + break; + } +} else { + switch (P.VU.vsew) { + case e16: + P.VU.elt(rd_num, 0, true) = f16(FRS1); + break; + case e32: + P.VU.elt(rd_num, 0, true) = f32(FRS1); + break; + case e64: + P.VU.elt(rd_num, 0, true) = f64(FRS1); + break; + } +} +VI_VFP_LOOP_END diff --git a/vendor/riscv-isa-sim/riscv/insns/vfsqrt_v.h b/vendor/riscv-isa-sim/riscv/insns/vfsqrt_v.h new file mode 100644 index 00000000..86f0148d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfsqrt_v.h @@ -0,0 +1,11 @@ +// vsqrt.v vd, vd2, vm +VI_VFP_V_LOOP +({ + vd = f16_sqrt(vs2); +}, +{ + vd = f32_sqrt(vs2); +}, +{ + vd = f64_sqrt(vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfsub_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfsub_vf.h new file mode 100644 index 00000000..fc6877ca --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfsub_vf.h @@ -0,0 +1,11 @@ +// vfsub.vf vd, vs2, rs1 +VI_VFP_VF_LOOP +({ + vd = f16_sub(vs2, rs1); +}, +{ + vd = f32_sub(vs2, rs1); +}, +{ + vd = f64_sub(vs2, rs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfsub_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfsub_vv.h new file mode 100644 index 00000000..b0403f11 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfsub_vv.h @@ -0,0 +1,11 @@ +// vfsub.vv vd, vs2, vs1 +VI_VFP_VV_LOOP +({ + vd = f16_sub(vs2, vs1); +}, +{ + vd = f32_sub(vs2, vs1); +}, +{ + vd = f64_sub(vs2, vs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwadd_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfwadd_vf.h new file mode 100644 index 00000000..b8249001 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwadd_vf.h @@ -0,0 +1,8 @@ +// vfwadd.vf vd, vs2, rs1 +VI_VFP_VF_LOOP_WIDE +({ + vd = f32_add(vs2, rs1); +}, +{ + vd = f64_add(vs2, rs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwadd_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfwadd_vv.h new file mode 100644 index 00000000..7255a50e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwadd_vv.h @@ -0,0 +1,8 @@ +// vfwadd.vv vd, vs2, vs1 +VI_VFP_VV_LOOP_WIDE +({ + vd = f32_add(vs2, vs1); +}, +{ + vd = f64_add(vs2, vs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwadd_wf.h b/vendor/riscv-isa-sim/riscv/insns/vfwadd_wf.h new file mode 100644 index 00000000..021b17f0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwadd_wf.h @@ -0,0 +1,8 @@ +// vfwadd.wf vd, vs2, vs1 +VI_VFP_WF_LOOP_WIDE +({ + vd = f32_add(vs2, rs1); +}, +{ + vd = f64_add(vs2, rs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwadd_wv.h b/vendor/riscv-isa-sim/riscv/insns/vfwadd_wv.h new file mode 100644 index 00000000..c1ed0389 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwadd_wv.h @@ -0,0 +1,8 @@ +// vfwadd.wv vd, vs2, vs1 +VI_VFP_WV_LOOP_WIDE +({ + vd = f32_add(vs2, vs1); +}, +{ + vd = f64_add(vs2, vs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwcvt_f_f_v.h b/vendor/riscv-isa-sim/riscv/insns/vfwcvt_f_f_v.h new file mode 100644 index 00000000..0700070a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwcvt_f_f_v.h @@ -0,0 +1,9 @@ +// vfwcvt.f.f.v vd, vs2, vm +VI_VFP_WCVT_FP_TO_FP( + {;}, // BODY8 + { vd = f16_to_f32(vs2); }, // BODY16 + { vd = f32_to_f64(vs2); }, // BODY32 + {;}, // CHECK8 + { require_extension(EXT_ZFH); }, // CHECK16 + { require_extension('D'); } // CHECK32 +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwcvt_f_x_v.h b/vendor/riscv-isa-sim/riscv/insns/vfwcvt_f_x_v.h new file mode 100644 index 00000000..f51e8e3e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwcvt_f_x_v.h @@ -0,0 +1,10 @@ +// vfwcvt.f.x.v vd, vs2, vm +VI_VFP_WCVT_INT_TO_FP( + { vd = i32_to_f16(vs2); }, // BODY8 + { vd = i32_to_f32(vs2); }, // BODY16 + { vd = i32_to_f64(vs2); }, // BODY32 + { require(p->extension_enabled(EXT_ZFH)); }, // CHECK8 + { require_extension('F'); }, // CHECK16 + { require_extension('D'); }, // CHECK32 + int // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwcvt_f_xu_v.h b/vendor/riscv-isa-sim/riscv/insns/vfwcvt_f_xu_v.h new file mode 100644 index 00000000..7dd49721 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwcvt_f_xu_v.h @@ -0,0 +1,10 @@ +// vfwcvt.f.xu.v vd, vs2, vm +VI_VFP_WCVT_INT_TO_FP( + { vd = ui32_to_f16(vs2); }, // BODY8 + { vd = ui32_to_f32(vs2); }, // BODY16 + { vd = ui32_to_f64(vs2); }, // BODY32 + { require(p->extension_enabled(EXT_ZFH)); }, // CHECK8 + { require_extension('F'); }, // CHECK16 + { require_extension('D'); }, // CHECK32 + uint // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwcvt_rtz_x_f_v.h b/vendor/riscv-isa-sim/riscv/insns/vfwcvt_rtz_x_f_v.h new file mode 100644 index 00000000..74e5b9a0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwcvt_rtz_x_f_v.h @@ -0,0 +1,10 @@ +// vfwcvt.rtz.x.f.v vd, vs2, vm +VI_VFP_WCVT_FP_TO_INT( + {;}, // BODY8 + { vd = f16_to_i32(vs2, softfloat_round_minMag, true); }, // BODY16 + { vd = f32_to_i64(vs2, softfloat_round_minMag, true); }, // BODY32 + {;}, // CHECK8 + { require_extension(EXT_ZFH); }, // CHECK16 + { require_extension('F'); }, // CHECK32 + int // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwcvt_rtz_xu_f_v.h b/vendor/riscv-isa-sim/riscv/insns/vfwcvt_rtz_xu_f_v.h new file mode 100644 index 00000000..72b8c6ee --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwcvt_rtz_xu_f_v.h @@ -0,0 +1,10 @@ +// vfwcvt.rtz,xu.f.v vd, vs2, vm +VI_VFP_WCVT_FP_TO_INT( + {;}, // BODY8 + { vd = f16_to_ui32(vs2, softfloat_round_minMag, true); }, // BODY16 + { vd = f32_to_ui64(vs2, softfloat_round_minMag, true); }, // BODY32 + {;}, // CHECK8 + { require_extension(EXT_ZFH); }, // CHECK16 + { require_extension('F'); }, // CHECK32 + uint // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwcvt_x_f_v.h b/vendor/riscv-isa-sim/riscv/insns/vfwcvt_x_f_v.h new file mode 100644 index 00000000..74497f4a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwcvt_x_f_v.h @@ -0,0 +1,10 @@ +// vfwcvt.x.f.v vd, vs2, vm +VI_VFP_WCVT_FP_TO_INT( + {;}, // BODY8 + { vd = f16_to_i32(vs2, softfloat_roundingMode, true); }, // BODY16 + { vd = f32_to_i64(vs2, softfloat_roundingMode, true); }, // BODY32 + {;}, // CHECK8 + { require_extension(EXT_ZFH); }, // CHECK16 + { require_extension('F'); }, // CHECK32 + int // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwcvt_xu_f_v.h b/vendor/riscv-isa-sim/riscv/insns/vfwcvt_xu_f_v.h new file mode 100644 index 00000000..ad96c9c3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwcvt_xu_f_v.h @@ -0,0 +1,10 @@ +// vfwcvt.xu.f.v vd, vs2, vm +VI_VFP_WCVT_FP_TO_INT( + {;}, // BODY8 + { vd = f16_to_ui32(vs2, softfloat_roundingMode, true); }, // BODY16 + { vd = f32_to_ui64(vs2, softfloat_roundingMode, true); }, // BODY32 + {;}, // CHECK8 + { require_extension(EXT_ZFH); }, // CHECK16 + { require_extension('F'); }, // CHECK32 + uint // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwmacc_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfwmacc_vf.h new file mode 100644 index 00000000..441fa0a7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwmacc_vf.h @@ -0,0 +1,8 @@ +// vfwmacc.vf vd, vs2, rs1 +VI_VFP_VF_LOOP_WIDE +({ + vd = f32_mulAdd(rs1, vs2, vd); +}, +{ + vd = f64_mulAdd(rs1, vs2, vd); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwmacc_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfwmacc_vv.h new file mode 100644 index 00000000..a654198b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwmacc_vv.h @@ -0,0 +1,8 @@ +// vfwmacc.vv vd, vs2, vs1 +VI_VFP_VV_LOOP_WIDE +({ + vd = f32_mulAdd(vs1, vs2, vd); +}, +{ + vd = f64_mulAdd(vs1, vs2, vd); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwmsac_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfwmsac_vf.h new file mode 100644 index 00000000..18010ff4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwmsac_vf.h @@ -0,0 +1,8 @@ +// vfwmsac.vf vd, vs2, rs1 +VI_VFP_VF_LOOP_WIDE +({ + vd = f32_mulAdd(rs1, vs2, f32(vd.v ^ F32_SIGN)); +}, +{ + vd = f64_mulAdd(rs1, vs2, f64(vd.v ^ F64_SIGN)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwmsac_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfwmsac_vv.h new file mode 100644 index 00000000..9dc4073f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwmsac_vv.h @@ -0,0 +1,8 @@ +// vfwmsac.vv vd, vs2, vs1 +VI_VFP_VV_LOOP_WIDE +({ + vd = f32_mulAdd(vs1, vs2, f32(vd.v ^ F32_SIGN)); +}, +{ + vd = f64_mulAdd(vs1, vs2, f64(vd.v ^ F64_SIGN)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwmul_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfwmul_vf.h new file mode 100644 index 00000000..2bb543f6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwmul_vf.h @@ -0,0 +1,8 @@ +// vfwmul.vf vd, vs2, rs1 +VI_VFP_VF_LOOP_WIDE +({ + vd = f32_mul(vs2, rs1); +}, +{ + vd = f64_mul(vs2, rs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwmul_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfwmul_vv.h new file mode 100644 index 00000000..2ce38e62 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwmul_vv.h @@ -0,0 +1,8 @@ +// vfwmul.vv vd, vs2, vs1 +VI_VFP_VV_LOOP_WIDE +({ + vd = f32_mul(vs2, vs1); +}, +{ + vd = f64_mul(vs2, vs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwnmacc_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfwnmacc_vf.h new file mode 100644 index 00000000..038bda08 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwnmacc_vf.h @@ -0,0 +1,8 @@ +// vfwnmacc.vf vd, vs2, rs1 +VI_VFP_VF_LOOP_WIDE +({ + vd = f32_mulAdd(f32(rs1.v ^ F32_SIGN), vs2, f32(vd.v ^ F32_SIGN)); +}, +{ + vd = f64_mulAdd(f64(rs1.v ^ F64_SIGN), vs2, f64(vd.v ^ F64_SIGN)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwnmacc_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfwnmacc_vv.h new file mode 100644 index 00000000..bf863e04 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwnmacc_vv.h @@ -0,0 +1,8 @@ +// vfwnmacc.vv vd, vs2, vs1 +VI_VFP_VV_LOOP_WIDE +({ + vd = f32_mulAdd(f32(vs1.v ^ F32_SIGN), vs2, f32(vd.v ^ F32_SIGN)); +}, +{ + vd = f64_mulAdd(f64(vs1.v ^ F64_SIGN), vs2, f64(vd.v ^ F64_SIGN)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwnmsac_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfwnmsac_vf.h new file mode 100644 index 00000000..1e288e1b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwnmsac_vf.h @@ -0,0 +1,8 @@ +// vfwnmacc.vf vd, vs2, rs1 +VI_VFP_VF_LOOP_WIDE +({ + vd = f32_mulAdd(f32(rs1.v ^ F32_SIGN), vs2, vd); +}, +{ + vd = f64_mulAdd(f64(rs1.v ^ F64_SIGN), vs2, vd); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwnmsac_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfwnmsac_vv.h new file mode 100644 index 00000000..ce97749e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwnmsac_vv.h @@ -0,0 +1,8 @@ +// vfwnmsac.vv vd, vs2, vs1 +VI_VFP_VV_LOOP_WIDE +({ + vd = f32_mulAdd(f32(vs1.v ^ F32_SIGN), vs2, vd); +}, +{ + vd = f64_mulAdd(f64(vs1.v ^ F64_SIGN), vs2, vd); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwredosum_vs.h b/vendor/riscv-isa-sim/riscv/insns/vfwredosum_vs.h new file mode 100644 index 00000000..1f42d8ff --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwredosum_vs.h @@ -0,0 +1,9 @@ +// vfwredosum.vs vd, vs2, vs1 +bool is_propagate = false; +VI_VFP_VV_LOOP_WIDE_REDUCTION +({ + vd_0 = f32_add(vd_0, vs2); +}, +{ + vd_0 = f64_add(vd_0, vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwredusum_vs.h b/vendor/riscv-isa-sim/riscv/insns/vfwredusum_vs.h new file mode 100644 index 00000000..4ef28969 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwredusum_vs.h @@ -0,0 +1,9 @@ +// vfwredsum.vs vd, vs2, vs1 +bool is_propagate = true; +VI_VFP_VV_LOOP_WIDE_REDUCTION +({ + vd_0 = f32_add(vd_0, vs2); +}, +{ + vd_0 = f64_add(vd_0, vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwsub_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfwsub_vf.h new file mode 100644 index 00000000..8c376884 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwsub_vf.h @@ -0,0 +1,8 @@ +// vfwsub.vf vd, vs2, rs1 +VI_VFP_VF_LOOP_WIDE +({ + vd = f32_sub(vs2, rs1); +}, +{ + vd = f64_sub(vs2, rs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwsub_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfwsub_vv.h new file mode 100644 index 00000000..ce08e36a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwsub_vv.h @@ -0,0 +1,8 @@ +// vfwsub.vv vd, vs2, vs1 +VI_VFP_VV_LOOP_WIDE +({ + vd = f32_sub(vs2, vs1); +}, +{ + vd = f64_sub(vs2, vs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwsub_wf.h b/vendor/riscv-isa-sim/riscv/insns/vfwsub_wf.h new file mode 100644 index 00000000..f6f47ca5 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwsub_wf.h @@ -0,0 +1,8 @@ +// vfwsub.wf vd, vs2, rs1 +VI_VFP_WF_LOOP_WIDE +({ + vd = f32_sub(vs2, rs1); +}, +{ + vd = f64_sub(vs2, rs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwsub_wv.h b/vendor/riscv-isa-sim/riscv/insns/vfwsub_wv.h new file mode 100644 index 00000000..eef904dc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwsub_wv.h @@ -0,0 +1,8 @@ +// vfwsub.wv vd, vs2, vs1 +VI_VFP_WV_LOOP_WIDE +({ + vd = f32_sub(vs2, vs1); +}, +{ + vd = f64_sub(vs2, vs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vid_v.h b/vendor/riscv-isa-sim/riscv/insns/vid_v.h new file mode 100644 index 00000000..c3162915 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vid_v.h @@ -0,0 +1,31 @@ +// vmpopc rd, vs2, vm +require(P.VU.vsew >= e8 && P.VU.vsew <= e64); +require_vector(true); +reg_t vl = P.VU.vl->read(); +reg_t sew = P.VU.vsew; +reg_t rd_num = insn.rd(); +reg_t rs1_num = insn.rs1(); +reg_t rs2_num = insn.rs2(); +require_align(rd_num, P.VU.vflmul); +require_vm; + +for (reg_t i = P.VU.vstart->read() ; i < P.VU.vl->read(); ++i) { + VI_LOOP_ELEMENT_SKIP(); + + switch (sew) { + case e8: + P.VU.elt(rd_num, i, true) = i; + break; + case e16: + P.VU.elt(rd_num, i, true) = i; + break; + case e32: + P.VU.elt(rd_num, i, true) = i; + break; + default: + P.VU.elt(rd_num, i, true) = i; + break; + } +} + +P.VU.vstart->write(0); diff --git a/vendor/riscv-isa-sim/riscv/insns/viota_m.h b/vendor/riscv-isa-sim/riscv/insns/viota_m.h new file mode 100644 index 00000000..f74f2c24 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/viota_m.h @@ -0,0 +1,53 @@ +// vmpopc rd, vs2, vm +require(P.VU.vsew >= e8 && P.VU.vsew <= e64); +require_vector(true); +reg_t vl = P.VU.vl->read(); +reg_t sew = P.VU.vsew; +reg_t rd_num = insn.rd(); +reg_t rs1_num = insn.rs1(); +reg_t rs2_num = insn.rs2(); +require(P.VU.vstart->read() == 0); +require_vm; +require_align(rd_num, P.VU.vflmul); +require_noover(rd_num, P.VU.vflmul, rs2_num, 1); + +int cnt = 0; +for (reg_t i = 0; i < vl; ++i) { + const int midx = i / 64; + const int mpos = i % 64; + + bool vs2_lsb = ((P.VU.elt(rs2_num, midx) >> mpos) & 0x1) == 1; + bool do_mask = (P.VU.elt(0, midx) >> mpos) & 0x1; + + bool has_one = false; + if (insn.v_vm() == 1 || (insn.v_vm() == 0 && do_mask)) { + if (vs2_lsb) { + has_one = true; + } + } + + bool use_ori = (insn.v_vm() == 0) && !do_mask; + switch (sew) { + case e8: + P.VU.elt(rd_num, i, true) = use_ori ? + P.VU.elt(rd_num, i) : cnt; + break; + case e16: + P.VU.elt(rd_num, i, true) = use_ori ? + P.VU.elt(rd_num, i) : cnt; + break; + case e32: + P.VU.elt(rd_num, i, true) = use_ori ? + P.VU.elt(rd_num, i) : cnt; + break; + default: + P.VU.elt(rd_num, i, true) = use_ori ? + P.VU.elt(rd_num, i) : cnt; + break; + } + + if (has_one) { + cnt++; + } +} + diff --git a/vendor/riscv-isa-sim/riscv/insns/vl1re16_v.h b/vendor/riscv-isa-sim/riscv/insns/vl1re16_v.h new file mode 100644 index 00000000..220e83e6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vl1re16_v.h @@ -0,0 +1,2 @@ +// vl1re16.v vd, (rs1) +VI_LD_WHOLE(uint16); diff --git a/vendor/riscv-isa-sim/riscv/insns/vl1re32_v.h b/vendor/riscv-isa-sim/riscv/insns/vl1re32_v.h new file mode 100644 index 00000000..e72ca02a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vl1re32_v.h @@ -0,0 +1,2 @@ +// vl1re32.v vd, (rs1) +VI_LD_WHOLE(uint32); diff --git a/vendor/riscv-isa-sim/riscv/insns/vl1re64_v.h b/vendor/riscv-isa-sim/riscv/insns/vl1re64_v.h new file mode 100644 index 00000000..265701a0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vl1re64_v.h @@ -0,0 +1,2 @@ +// vl1re64.v vd, (rs1) +VI_LD_WHOLE(uint64); diff --git a/vendor/riscv-isa-sim/riscv/insns/vl1re8_v.h b/vendor/riscv-isa-sim/riscv/insns/vl1re8_v.h new file mode 100644 index 00000000..b4ce6616 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vl1re8_v.h @@ -0,0 +1,2 @@ +// vl1re8.v vd, (rs1) +VI_LD_WHOLE(uint8); diff --git a/vendor/riscv-isa-sim/riscv/insns/vl2re16_v.h b/vendor/riscv-isa-sim/riscv/insns/vl2re16_v.h new file mode 100644 index 00000000..2846edd9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vl2re16_v.h @@ -0,0 +1,2 @@ +// vl2e16.v vd, (rs1) +VI_LD_WHOLE(uint16); diff --git a/vendor/riscv-isa-sim/riscv/insns/vl2re32_v.h b/vendor/riscv-isa-sim/riscv/insns/vl2re32_v.h new file mode 100644 index 00000000..5cea8355 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vl2re32_v.h @@ -0,0 +1,2 @@ +// vl2re32.v vd, (rs1) +VI_LD_WHOLE(uint32); diff --git a/vendor/riscv-isa-sim/riscv/insns/vl2re64_v.h b/vendor/riscv-isa-sim/riscv/insns/vl2re64_v.h new file mode 100644 index 00000000..efdf2ce2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vl2re64_v.h @@ -0,0 +1,2 @@ +// vl2re64.v vd, (rs1) +VI_LD_WHOLE(uint64); diff --git a/vendor/riscv-isa-sim/riscv/insns/vl2re8_v.h b/vendor/riscv-isa-sim/riscv/insns/vl2re8_v.h new file mode 100644 index 00000000..fcc3c4c0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vl2re8_v.h @@ -0,0 +1,2 @@ +// vl2re8.v vd, (rs1) +VI_LD_WHOLE(uint8); diff --git a/vendor/riscv-isa-sim/riscv/insns/vl4re16_v.h b/vendor/riscv-isa-sim/riscv/insns/vl4re16_v.h new file mode 100644 index 00000000..03634183 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vl4re16_v.h @@ -0,0 +1,2 @@ +// vl4re16.v vd, (rs1) +VI_LD_WHOLE(uint16); diff --git a/vendor/riscv-isa-sim/riscv/insns/vl4re32_v.h b/vendor/riscv-isa-sim/riscv/insns/vl4re32_v.h new file mode 100644 index 00000000..e37cc1ab --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vl4re32_v.h @@ -0,0 +1,2 @@ +// vl4re32.v vd, (rs1) +VI_LD_WHOLE(uint32); diff --git a/vendor/riscv-isa-sim/riscv/insns/vl4re64_v.h b/vendor/riscv-isa-sim/riscv/insns/vl4re64_v.h new file mode 100644 index 00000000..11486f5d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vl4re64_v.h @@ -0,0 +1,2 @@ +// vl4re64.v vd, (rs1) +VI_LD_WHOLE(uint64); diff --git a/vendor/riscv-isa-sim/riscv/insns/vl4re8_v.h b/vendor/riscv-isa-sim/riscv/insns/vl4re8_v.h new file mode 100644 index 00000000..f9ce3ff7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vl4re8_v.h @@ -0,0 +1,2 @@ +// vl4re8.v vd, (rs1) +VI_LD_WHOLE(uint8); diff --git a/vendor/riscv-isa-sim/riscv/insns/vl8re16_v.h b/vendor/riscv-isa-sim/riscv/insns/vl8re16_v.h new file mode 100644 index 00000000..0b3f1413 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vl8re16_v.h @@ -0,0 +1,2 @@ +// vl8re16.v vd, (rs1) +VI_LD_WHOLE(uint16); diff --git a/vendor/riscv-isa-sim/riscv/insns/vl8re32_v.h b/vendor/riscv-isa-sim/riscv/insns/vl8re32_v.h new file mode 100644 index 00000000..3372b89d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vl8re32_v.h @@ -0,0 +1,2 @@ +// vl8re32.v vd, (rs1) +VI_LD_WHOLE(uint32); diff --git a/vendor/riscv-isa-sim/riscv/insns/vl8re64_v.h b/vendor/riscv-isa-sim/riscv/insns/vl8re64_v.h new file mode 100644 index 00000000..f9a9ca98 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vl8re64_v.h @@ -0,0 +1,2 @@ +// vl8re64.v vd, (rs1) +VI_LD_WHOLE(uint64); diff --git a/vendor/riscv-isa-sim/riscv/insns/vl8re8_v.h b/vendor/riscv-isa-sim/riscv/insns/vl8re8_v.h new file mode 100644 index 00000000..ee05e81a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vl8re8_v.h @@ -0,0 +1,2 @@ +// vl8re8.v vd, (rs1) +VI_LD_WHOLE(uint8); diff --git a/vendor/riscv-isa-sim/riscv/insns/vle16_v.h b/vendor/riscv-isa-sim/riscv/insns/vle16_v.h new file mode 100644 index 00000000..70bf39fb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vle16_v.h @@ -0,0 +1,2 @@ +// vle16.v and vlseg[2-8]e16.v +VI_LD(0, (i * nf + fn), int16, false); diff --git a/vendor/riscv-isa-sim/riscv/insns/vle16ff_v.h b/vendor/riscv-isa-sim/riscv/insns/vle16ff_v.h new file mode 100644 index 00000000..53c88891 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vle16ff_v.h @@ -0,0 +1,2 @@ +// vle16ff.v and vlseg[2-8]e16ff.v +VI_LDST_FF(int16); diff --git a/vendor/riscv-isa-sim/riscv/insns/vle32_v.h b/vendor/riscv-isa-sim/riscv/insns/vle32_v.h new file mode 100644 index 00000000..f1d0e73c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vle32_v.h @@ -0,0 +1,2 @@ +// vle32.v and vlseg[2-8]e32.v +VI_LD(0, (i * nf + fn), int32, false); diff --git a/vendor/riscv-isa-sim/riscv/insns/vle32ff_v.h b/vendor/riscv-isa-sim/riscv/insns/vle32ff_v.h new file mode 100644 index 00000000..7d03d7dd --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vle32ff_v.h @@ -0,0 +1,2 @@ +// vle32ff.v and vlseg[2-8]e32ff.v +VI_LDST_FF(int32); diff --git a/vendor/riscv-isa-sim/riscv/insns/vle64_v.h b/vendor/riscv-isa-sim/riscv/insns/vle64_v.h new file mode 100644 index 00000000..86deb5cb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vle64_v.h @@ -0,0 +1,2 @@ +// vle64.v and vlseg[2-8]e64.v +VI_LD(0, (i * nf + fn), int64, false); diff --git a/vendor/riscv-isa-sim/riscv/insns/vle64ff_v.h b/vendor/riscv-isa-sim/riscv/insns/vle64ff_v.h new file mode 100644 index 00000000..39996da6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vle64ff_v.h @@ -0,0 +1,2 @@ +// vle64ff.v and vlseg[2-8]e64ff.v +VI_LDST_FF(int64); diff --git a/vendor/riscv-isa-sim/riscv/insns/vle8_v.h b/vendor/riscv-isa-sim/riscv/insns/vle8_v.h new file mode 100644 index 00000000..ffe17c3a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vle8_v.h @@ -0,0 +1,2 @@ +// vle8.v and vlseg[2-8]e8.v +VI_LD(0, (i * nf + fn), int8, false); diff --git a/vendor/riscv-isa-sim/riscv/insns/vle8ff_v.h b/vendor/riscv-isa-sim/riscv/insns/vle8ff_v.h new file mode 100644 index 00000000..b56d1d33 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vle8ff_v.h @@ -0,0 +1,2 @@ +// vle8ff.v and vlseg[2-8]e8ff.v +VI_LDST_FF(int8); diff --git a/vendor/riscv-isa-sim/riscv/insns/vlm_v.h b/vendor/riscv-isa-sim/riscv/insns/vlm_v.h new file mode 100644 index 00000000..6d3f83aa --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vlm_v.h @@ -0,0 +1,2 @@ +// vle1.v and vlseg[2-8]e8.v +VI_LD(0, (i * nf + fn), int8, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/vloxei16_v.h b/vendor/riscv-isa-sim/riscv/insns/vloxei16_v.h new file mode 100644 index 00000000..6e4ed49b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vloxei16_v.h @@ -0,0 +1,2 @@ +// vlxei16.v and vlxseg[2-8]e16.v +VI_LD_INDEX(e16, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/vloxei32_v.h b/vendor/riscv-isa-sim/riscv/insns/vloxei32_v.h new file mode 100644 index 00000000..a7da8ff0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vloxei32_v.h @@ -0,0 +1,2 @@ +// vlxe32.v and vlxseg[2-8]ei32.v +VI_LD_INDEX(e32, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/vloxei64_v.h b/vendor/riscv-isa-sim/riscv/insns/vloxei64_v.h new file mode 100644 index 00000000..067224e4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vloxei64_v.h @@ -0,0 +1,3 @@ +// vlxei64.v and vlxseg[2-8]ei64.v +VI_LD_INDEX(e64, true); + diff --git a/vendor/riscv-isa-sim/riscv/insns/vloxei8_v.h b/vendor/riscv-isa-sim/riscv/insns/vloxei8_v.h new file mode 100644 index 00000000..d2730499 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vloxei8_v.h @@ -0,0 +1,2 @@ +// vlxei8.v and vlxseg[2-8]ei8.v +VI_LD_INDEX(e8, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/vlse16_v.h b/vendor/riscv-isa-sim/riscv/insns/vlse16_v.h new file mode 100644 index 00000000..5ac23a98 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vlse16_v.h @@ -0,0 +1,2 @@ +// vlse16.v and vlsseg[2-8]e16.v +VI_LD(i * RS2, fn, int16, false); diff --git a/vendor/riscv-isa-sim/riscv/insns/vlse32_v.h b/vendor/riscv-isa-sim/riscv/insns/vlse32_v.h new file mode 100644 index 00000000..cfd74fb9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vlse32_v.h @@ -0,0 +1,2 @@ +// vlse32.v and vlsseg[2-8]e32.v +VI_LD(i * RS2, fn, int32, false); diff --git a/vendor/riscv-isa-sim/riscv/insns/vlse64_v.h b/vendor/riscv-isa-sim/riscv/insns/vlse64_v.h new file mode 100644 index 00000000..2e339638 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vlse64_v.h @@ -0,0 +1,2 @@ +// vlse64.v and vlsseg[2-8]e64.v +VI_LD(i * RS2, fn, int64, false); diff --git a/vendor/riscv-isa-sim/riscv/insns/vlse8_v.h b/vendor/riscv-isa-sim/riscv/insns/vlse8_v.h new file mode 100644 index 00000000..275f0224 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vlse8_v.h @@ -0,0 +1,2 @@ +// vlse8.v and vlsseg[2-8]e8.v +VI_LD(i * RS2, fn, int8, false); diff --git a/vendor/riscv-isa-sim/riscv/insns/vluxei16_v.h b/vendor/riscv-isa-sim/riscv/insns/vluxei16_v.h new file mode 100644 index 00000000..6e4ed49b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vluxei16_v.h @@ -0,0 +1,2 @@ +// vlxei16.v and vlxseg[2-8]e16.v +VI_LD_INDEX(e16, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/vluxei32_v.h b/vendor/riscv-isa-sim/riscv/insns/vluxei32_v.h new file mode 100644 index 00000000..a7da8ff0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vluxei32_v.h @@ -0,0 +1,2 @@ +// vlxe32.v and vlxseg[2-8]ei32.v +VI_LD_INDEX(e32, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/vluxei64_v.h b/vendor/riscv-isa-sim/riscv/insns/vluxei64_v.h new file mode 100644 index 00000000..067224e4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vluxei64_v.h @@ -0,0 +1,3 @@ +// vlxei64.v and vlxseg[2-8]ei64.v +VI_LD_INDEX(e64, true); + diff --git a/vendor/riscv-isa-sim/riscv/insns/vluxei8_v.h b/vendor/riscv-isa-sim/riscv/insns/vluxei8_v.h new file mode 100644 index 00000000..d2730499 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vluxei8_v.h @@ -0,0 +1,2 @@ +// vlxei8.v and vlxseg[2-8]ei8.v +VI_LD_INDEX(e8, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/vmacc_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmacc_vv.h new file mode 100644 index 00000000..e6ec93ff --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmacc_vv.h @@ -0,0 +1,5 @@ +// vmacc.vv: vd[i] = +(vs1[i] * vs2[i]) + vd[i] +VI_VV_LOOP +({ + vd = vs1 * vs2 + vd; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmacc_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmacc_vx.h new file mode 100644 index 00000000..d40b264a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmacc_vx.h @@ -0,0 +1,5 @@ +// vmacc.vx: vd[i] = +(x[rs1] * vs2[i]) + vd[i] +VI_VX_LOOP +({ + vd = rs1 * vs2 + vd; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmadc_vi.h b/vendor/riscv-isa-sim/riscv/insns/vmadc_vi.h new file mode 100644 index 00000000..37da8adf --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmadc_vi.h @@ -0,0 +1,2 @@ +// vmadc.vi vd, vs2, simm5 +#include "vmadc_vim.h" diff --git a/vendor/riscv-isa-sim/riscv/insns/vmadc_vim.h b/vendor/riscv-isa-sim/riscv/insns/vmadc_vim.h new file mode 100644 index 00000000..a1f78fff --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmadc_vim.h @@ -0,0 +1,5 @@ +// vmadc.vim vd, vs2, simm5, v0 +VI_XI_LOOP_CARRY +({ + res = (((op_mask & simm5) + (op_mask & vs2) + carry) >> sew) & 0x1u; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmadc_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmadc_vv.h new file mode 100644 index 00000000..e120fe63 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmadc_vv.h @@ -0,0 +1,2 @@ +// vmadc.vvm vd, vs2, rs1 +#include "vmadc_vvm.h" diff --git a/vendor/riscv-isa-sim/riscv/insns/vmadc_vvm.h b/vendor/riscv-isa-sim/riscv/insns/vmadc_vvm.h new file mode 100644 index 00000000..96a7f2cb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmadc_vvm.h @@ -0,0 +1,5 @@ +// vmadc.vvm vd, vs2, rs1, v0 +VI_VV_LOOP_CARRY +({ + res = (((op_mask & vs1) + (op_mask & vs2) + carry) >> sew) & 0x1u; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmadc_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmadc_vx.h new file mode 100644 index 00000000..39c20b1e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmadc_vx.h @@ -0,0 +1,2 @@ +// vadc.vx vd, vs2, rs1 +#include "vmadc_vxm.h" diff --git a/vendor/riscv-isa-sim/riscv/insns/vmadc_vxm.h b/vendor/riscv-isa-sim/riscv/insns/vmadc_vxm.h new file mode 100644 index 00000000..1561d858 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmadc_vxm.h @@ -0,0 +1,5 @@ +// vadc.vx vd, vs2, rs1, v0 +VI_XI_LOOP_CARRY +({ + res = (((op_mask & rs1) + (op_mask & vs2) + carry) >> sew) & 0x1u; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmadd_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmadd_vv.h new file mode 100644 index 00000000..a1c0d2ed --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmadd_vv.h @@ -0,0 +1,5 @@ +// vmadd: vd[i] = (vd[i] * vs1[i]) + vs2[i] +VI_VV_LOOP +({ + vd = vd * vs1 + vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmadd_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmadd_vx.h new file mode 100644 index 00000000..1a8a0015 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmadd_vx.h @@ -0,0 +1,5 @@ +// vmadd: vd[i] = (vd[i] * x[rs1]) + vs2[i] +VI_VX_LOOP +({ + vd = vd * rs1 + vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmand_mm.h b/vendor/riscv-isa-sim/riscv/insns/vmand_mm.h new file mode 100644 index 00000000..04615c60 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmand_mm.h @@ -0,0 +1,2 @@ +// vmand.mm vd, vs2, vs1 +VI_LOOP_MASK(vs2 & vs1); diff --git a/vendor/riscv-isa-sim/riscv/insns/vmandn_mm.h b/vendor/riscv-isa-sim/riscv/insns/vmandn_mm.h new file mode 100644 index 00000000..e9a87cf4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmandn_mm.h @@ -0,0 +1,2 @@ +// vmandn.mm vd, vs2, vs1 +VI_LOOP_MASK(vs2 & ~vs1); diff --git a/vendor/riscv-isa-sim/riscv/insns/vmax_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmax_vv.h new file mode 100644 index 00000000..b9f15c5f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmax_vv.h @@ -0,0 +1,10 @@ +// vmax.vv vd, vs2, vs1, vm # Vector-vector +VI_VV_LOOP +({ + if (vs1 >= vs2) { + vd = vs1; + } else { + vd = vs2; + } + +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmax_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmax_vx.h new file mode 100644 index 00000000..06f3f431 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmax_vx.h @@ -0,0 +1,10 @@ +// vmax.vx vd, vs2, rs1, vm # vector-scalar +VI_VX_LOOP +({ + if (rs1 >= vs2) { + vd = rs1; + } else { + vd = vs2; + } + +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmaxu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmaxu_vv.h new file mode 100644 index 00000000..4e6868d1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmaxu_vv.h @@ -0,0 +1,9 @@ +// vmaxu.vv vd, vs2, vs1, vm # Vector-vector +VI_VV_ULOOP +({ + if (vs1 >= vs2) { + vd = vs1; + } else { + vd = vs2; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmaxu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmaxu_vx.h new file mode 100644 index 00000000..cab89188 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmaxu_vx.h @@ -0,0 +1,9 @@ +// vmaxu.vx vd, vs2, rs1, vm # vector-scalar +VI_VX_ULOOP +({ + if (rs1 >= vs2) { + vd = rs1; + } else { + vd = vs2; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmerge_vim.h b/vendor/riscv-isa-sim/riscv/insns/vmerge_vim.h new file mode 100644 index 00000000..0b2fac98 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmerge_vim.h @@ -0,0 +1,5 @@ +// vmerge.vim vd, vs2, simm5 +VI_VI_MERGE_LOOP +({ + vd = use_first ? simm5 : vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmerge_vvm.h b/vendor/riscv-isa-sim/riscv/insns/vmerge_vvm.h new file mode 100644 index 00000000..b60c1526 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmerge_vvm.h @@ -0,0 +1,5 @@ +// vmerge.vvm vd, vs2, vs1 +VI_VV_MERGE_LOOP +({ + vd = use_first ? vs1 : vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmerge_vxm.h b/vendor/riscv-isa-sim/riscv/insns/vmerge_vxm.h new file mode 100644 index 00000000..a22da8a1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmerge_vxm.h @@ -0,0 +1,5 @@ +// vmerge.vxm vd, vs2, rs1 +VI_VX_MERGE_LOOP +({ + vd = use_first ? rs1 : vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmfeq_vf.h b/vendor/riscv-isa-sim/riscv/insns/vmfeq_vf.h new file mode 100644 index 00000000..a4d7c50c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmfeq_vf.h @@ -0,0 +1,11 @@ +// vmfeq.vf vd, vs2, fs1 +VI_VFP_VF_LOOP_CMP +({ + res = f16_eq(vs2, rs1); +}, +{ + res = f32_eq(vs2, rs1); +}, +{ + res = f64_eq(vs2, rs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmfeq_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmfeq_vv.h new file mode 100644 index 00000000..b08ce980 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmfeq_vv.h @@ -0,0 +1,11 @@ +// vmfeq.vv vd, vs2, vs1 +VI_VFP_VV_LOOP_CMP +({ + res = f16_eq(vs2, vs1); +}, +{ + res = f32_eq(vs2, vs1); +}, +{ + res = f64_eq(vs2, vs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmfge_vf.h b/vendor/riscv-isa-sim/riscv/insns/vmfge_vf.h new file mode 100644 index 00000000..ab4df5ce --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmfge_vf.h @@ -0,0 +1,11 @@ +// vmfge.vf vd, vs2, rs1 +VI_VFP_VF_LOOP_CMP +({ + res = f16_le(rs1, vs2); +}, +{ + res = f32_le(rs1, vs2); +}, +{ + res = f64_le(rs1, vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmfgt_vf.h b/vendor/riscv-isa-sim/riscv/insns/vmfgt_vf.h new file mode 100644 index 00000000..dcc3ea37 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmfgt_vf.h @@ -0,0 +1,11 @@ +// vmfgt.vf vd, vs2, rs1 +VI_VFP_VF_LOOP_CMP +({ + res = f16_lt(rs1, vs2); +}, +{ + res = f32_lt(rs1, vs2); +}, +{ + res = f64_lt(rs1, vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmfle_vf.h b/vendor/riscv-isa-sim/riscv/insns/vmfle_vf.h new file mode 100644 index 00000000..a942705d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmfle_vf.h @@ -0,0 +1,11 @@ +// vmfle.vf vd, vs2, rs1 +VI_VFP_VF_LOOP_CMP +({ + res = f16_le(vs2, rs1); +}, +{ + res = f32_le(vs2, rs1); +}, +{ + res = f64_le(vs2, rs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmfle_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmfle_vv.h new file mode 100644 index 00000000..dd6f81da --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmfle_vv.h @@ -0,0 +1,11 @@ +// vmfle.vv vd, vs2, rs1 +VI_VFP_VV_LOOP_CMP +({ + res = f16_le(vs2, vs1); +}, +{ + res = f32_le(vs2, vs1); +}, +{ + res = f64_le(vs2, vs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmflt_vf.h b/vendor/riscv-isa-sim/riscv/insns/vmflt_vf.h new file mode 100644 index 00000000..110dbd1b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmflt_vf.h @@ -0,0 +1,11 @@ +// vmflt.vf vd, vs2, rs1 +VI_VFP_VF_LOOP_CMP +({ + res = f16_lt(vs2, rs1); +}, +{ + res = f32_lt(vs2, rs1); +}, +{ + res = f64_lt(vs2, rs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmflt_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmflt_vv.h new file mode 100644 index 00000000..35f8d702 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmflt_vv.h @@ -0,0 +1,11 @@ +// vmflt.vv vd, vs2, vs1 +VI_VFP_VV_LOOP_CMP +({ + res = f16_lt(vs2, vs1); +}, +{ + res = f32_lt(vs2, vs1); +}, +{ + res = f64_lt(vs2, vs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmfne_vf.h b/vendor/riscv-isa-sim/riscv/insns/vmfne_vf.h new file mode 100644 index 00000000..1b61d571 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmfne_vf.h @@ -0,0 +1,11 @@ +// vmfne.vf vd, vs2, rs1 +VI_VFP_VF_LOOP_CMP +({ + res = !f16_eq(vs2, rs1); +}, +{ + res = !f32_eq(vs2, rs1); +}, +{ + res = !f64_eq(vs2, rs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmfne_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmfne_vv.h new file mode 100644 index 00000000..4447c3cc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmfne_vv.h @@ -0,0 +1,11 @@ +// vmfne.vv vd, vs2, rs1 +VI_VFP_VV_LOOP_CMP +({ + res = !f16_eq(vs2, vs1); +}, +{ + res = !f32_eq(vs2, vs1); +}, +{ + res = !f64_eq(vs2, vs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmin_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmin_vv.h new file mode 100644 index 00000000..21da0b3c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmin_vv.h @@ -0,0 +1,11 @@ +// vmin.vv vd, vs2, vs1, vm # Vector-vector +VI_VV_LOOP +({ + if (vs1 <= vs2) { + vd = vs1; + } else { + vd = vs2; + } + + +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmin_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmin_vx.h new file mode 100644 index 00000000..3291776d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmin_vx.h @@ -0,0 +1,11 @@ +// vminx.vx vd, vs2, rs1, vm # vector-scalar +VI_VX_LOOP +({ + if (rs1 <= vs2) { + vd = rs1; + } else { + vd = vs2; + } + + +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vminu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vminu_vv.h new file mode 100644 index 00000000..c0ab1958 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vminu_vv.h @@ -0,0 +1,9 @@ +// vminu.vv vd, vs2, vs1, vm # Vector-vector +VI_VV_ULOOP +({ + if (vs1 <= vs2) { + vd = vs1; + } else { + vd = vs2; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vminu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vminu_vx.h new file mode 100644 index 00000000..1055895a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vminu_vx.h @@ -0,0 +1,10 @@ +// vminu.vx vd, vs2, rs1, vm # vector-scalar +VI_VX_ULOOP +({ + if (rs1 <= vs2) { + vd = rs1; + } else { + vd = vs2; + } + +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmnand_mm.h b/vendor/riscv-isa-sim/riscv/insns/vmnand_mm.h new file mode 100644 index 00000000..5a3ab090 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmnand_mm.h @@ -0,0 +1,2 @@ +// vmnand.mm vd, vs2, vs1 +VI_LOOP_MASK(~(vs2 & vs1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/vmnor_mm.h b/vendor/riscv-isa-sim/riscv/insns/vmnor_mm.h new file mode 100644 index 00000000..ab933786 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmnor_mm.h @@ -0,0 +1,2 @@ +// vmnor.mm vd, vs2, vs1 +VI_LOOP_MASK(~(vs2 | vs1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/vmor_mm.h b/vendor/riscv-isa-sim/riscv/insns/vmor_mm.h new file mode 100644 index 00000000..32e71b93 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmor_mm.h @@ -0,0 +1,2 @@ +// vmor.mm vd, vs2, vs1 +VI_LOOP_MASK(vs2 | vs1); diff --git a/vendor/riscv-isa-sim/riscv/insns/vmorn_mm.h b/vendor/riscv-isa-sim/riscv/insns/vmorn_mm.h new file mode 100644 index 00000000..23026f5c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmorn_mm.h @@ -0,0 +1,2 @@ +// vmorn.mm vd, vs2, vs1 +VI_LOOP_MASK(vs2 | ~vs1); diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsbc_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmsbc_vv.h new file mode 100644 index 00000000..a7bbba18 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsbc_vv.h @@ -0,0 +1,2 @@ +// vmsbc.vv vd, vs2, rs1 +#include "vmsbc_vvm.h" diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsbc_vvm.h b/vendor/riscv-isa-sim/riscv/insns/vmsbc_vvm.h new file mode 100644 index 00000000..3225c62d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsbc_vvm.h @@ -0,0 +1,5 @@ +// vmsbc.vvm vd, vs2, rs1, v0 +VI_VV_LOOP_CARRY +({ + res = (((op_mask & vs2) - (op_mask & vs1) - carry) >> sew) & 0x1u; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsbc_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmsbc_vx.h new file mode 100644 index 00000000..cc6b9279 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsbc_vx.h @@ -0,0 +1,2 @@ +// vmsbc.vx vd, vs2, rs1 +#include "vmsbc_vxm.h" diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsbc_vxm.h b/vendor/riscv-isa-sim/riscv/insns/vmsbc_vxm.h new file mode 100644 index 00000000..8cc46bad --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsbc_vxm.h @@ -0,0 +1,5 @@ +// vmsbc.vxm vd, vs2, rs1, v0 +VI_XI_LOOP_CARRY +({ + res = (((op_mask & vs2) - (op_mask & rs1) - carry) >> sew) & 0x1u; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsbf_m.h b/vendor/riscv-isa-sim/riscv/insns/vmsbf_m.h new file mode 100644 index 00000000..6147f6de --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsbf_m.h @@ -0,0 +1,32 @@ +// vmsbf.m vd, vs2, vm +require(P.VU.vsew >= e8 && P.VU.vsew <= e64); +require_vector(true); +require(P.VU.vstart->read() == 0); +require_vm; +require(insn.rd() != insn.rs2()); + +reg_t vl = P.VU.vl->read(); +reg_t rd_num = insn.rd(); +reg_t rs2_num = insn.rs2(); + +bool has_one = false; +for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { + const int midx = i / 64; + const int mpos = i % 64; + const uint64_t mmask = UINT64_C(1) << mpos; \ + + bool vs2_lsb = ((P.VU.elt(rs2_num, midx) >> mpos) & 0x1) == 1; + bool do_mask = (P.VU.elt(0, midx) >> mpos) & 0x1; + + + if (insn.v_vm() == 1 || (insn.v_vm() == 0 && do_mask)) { + auto &vd = P.VU.elt(rd_num, midx, true); + uint64_t res = 0; + if (!has_one && !vs2_lsb) { + res = 1; + } else if(!has_one && vs2_lsb) { + has_one = true; + } + vd = (vd & ~mmask) | ((res << mpos) & mmask); + } +} diff --git a/vendor/riscv-isa-sim/riscv/insns/vmseq_vi.h b/vendor/riscv-isa-sim/riscv/insns/vmseq_vi.h new file mode 100644 index 00000000..cfc16825 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmseq_vi.h @@ -0,0 +1,5 @@ +// vseq.vi vd, vs2, simm5 +VI_VI_LOOP_CMP +({ + res = simm5 == vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmseq_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmseq_vv.h new file mode 100644 index 00000000..91fd204a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmseq_vv.h @@ -0,0 +1,6 @@ +// vseq.vv vd, vs2, vs1 +VI_VV_LOOP_CMP +({ + res = vs2 == vs1; +}) + diff --git a/vendor/riscv-isa-sim/riscv/insns/vmseq_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmseq_vx.h new file mode 100644 index 00000000..ab633231 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmseq_vx.h @@ -0,0 +1,5 @@ +// vseq.vx vd, vs2, rs1 +VI_VX_LOOP_CMP +({ + res = rs1 == vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsgt_vi.h b/vendor/riscv-isa-sim/riscv/insns/vmsgt_vi.h new file mode 100644 index 00000000..4f7dea8e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsgt_vi.h @@ -0,0 +1,5 @@ +// vsgt.vi vd, vs2, simm5 +VI_VI_LOOP_CMP +({ + res = vs2 > simm5; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsgt_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmsgt_vx.h new file mode 100644 index 00000000..5f24db69 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsgt_vx.h @@ -0,0 +1,5 @@ +// vsgt.vx vd, vs2, rs1 +VI_VX_LOOP_CMP +({ + res = vs2 > rs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsgtu_vi.h b/vendor/riscv-isa-sim/riscv/insns/vmsgtu_vi.h new file mode 100644 index 00000000..be28fee1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsgtu_vi.h @@ -0,0 +1,5 @@ +// vmsgtu.vi vd, vd2, simm5 +VI_VI_ULOOP_CMP +({ + res = vs2 > (insn.v_simm5() & (UINT64_MAX >> (64 - P.VU.vsew))); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsgtu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmsgtu_vx.h new file mode 100644 index 00000000..7f398008 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsgtu_vx.h @@ -0,0 +1,5 @@ +// vsgtu.vx vd, vs2, rs1 +VI_VX_ULOOP_CMP +({ + res = vs2 > rs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsif_m.h b/vendor/riscv-isa-sim/riscv/insns/vmsif_m.h new file mode 100644 index 00000000..447813fe --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsif_m.h @@ -0,0 +1,32 @@ +// vmsif.m rd, vs2, vm +require(P.VU.vsew >= e8 && P.VU.vsew <= e64); +require_vector(true); +require(P.VU.vstart->read() == 0); +require_vm; +require(insn.rd() != insn.rs2()); + +reg_t vl = P.VU.vl->read(); +reg_t rd_num = insn.rd(); +reg_t rs2_num = insn.rs2(); + +bool has_one = false; +for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { + const int midx = i / 64; + const int mpos = i % 64; + const uint64_t mmask = UINT64_C(1) << mpos; \ + + bool vs2_lsb = ((P.VU.elt(rs2_num, midx ) >> mpos) & 0x1) == 1; + bool do_mask = (P.VU.elt(0, midx) >> mpos) & 0x1; + + if (insn.v_vm() == 1 || (insn.v_vm() == 0 && do_mask)) { + auto &vd = P.VU.elt(rd_num, midx, true); + uint64_t res = 0; + if (!has_one && !vs2_lsb) { + res = 1; + } else if(!has_one && vs2_lsb) { + has_one = true; + res = 1; + } + vd = (vd & ~mmask) | ((res << mpos) & mmask); + } +} diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsle_vi.h b/vendor/riscv-isa-sim/riscv/insns/vmsle_vi.h new file mode 100644 index 00000000..f0f67d02 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsle_vi.h @@ -0,0 +1,5 @@ +// vsle.vi vd, vs2, simm5 +VI_VI_LOOP_CMP +({ + res = vs2 <= simm5; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsle_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmsle_vv.h new file mode 100644 index 00000000..30aba06d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsle_vv.h @@ -0,0 +1,5 @@ +// vsle.vv vd, vs2, vs1 +VI_VV_LOOP_CMP +({ + res = vs2 <= vs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsle_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmsle_vx.h new file mode 100644 index 00000000..c26d5969 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsle_vx.h @@ -0,0 +1,5 @@ +// vsle.vx vd, vs2, rs1 +VI_VX_LOOP_CMP +({ + res = vs2 <= rs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsleu_vi.h b/vendor/riscv-isa-sim/riscv/insns/vmsleu_vi.h new file mode 100644 index 00000000..0e66b781 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsleu_vi.h @@ -0,0 +1,5 @@ +// vmsleu.vi vd, vs2, simm5 +VI_VI_ULOOP_CMP +({ + res = vs2 <= (insn.v_simm5() & (UINT64_MAX >> (64 - P.VU.vsew))); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsleu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmsleu_vv.h new file mode 100644 index 00000000..0e460326 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsleu_vv.h @@ -0,0 +1,5 @@ +// vsleu.vv vd, vs2, vs1 +VI_VV_ULOOP_CMP +({ + res = vs2 <= vs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsleu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmsleu_vx.h new file mode 100644 index 00000000..935b1768 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsleu_vx.h @@ -0,0 +1,5 @@ +// vsleu.vx vd, vs2, rs1 +VI_VX_ULOOP_CMP +({ + res = vs2 <= rs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmslt_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmslt_vv.h new file mode 100644 index 00000000..71e6f87f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmslt_vv.h @@ -0,0 +1,5 @@ +// vslt.vv vd, vd2, vs1 +VI_VV_LOOP_CMP +({ + res = vs2 < vs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmslt_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmslt_vx.h new file mode 100644 index 00000000..b32bb145 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmslt_vx.h @@ -0,0 +1,5 @@ +// vslt.vx vd, vs2, vs1 +VI_VX_LOOP_CMP +({ + res = vs2 < rs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsltu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmsltu_vv.h new file mode 100644 index 00000000..53a570ae --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsltu_vv.h @@ -0,0 +1,5 @@ +// vsltu.vv vd, vs2, vs1 +VI_VV_ULOOP_CMP +({ + res = vs2 < vs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsltu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmsltu_vx.h new file mode 100644 index 00000000..80825448 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsltu_vx.h @@ -0,0 +1,5 @@ +// vsltu.vx vd, vs2, vs1 +VI_VX_ULOOP_CMP +({ + res = vs2 < rs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsne_vi.h b/vendor/riscv-isa-sim/riscv/insns/vmsne_vi.h new file mode 100644 index 00000000..5e9758ef --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsne_vi.h @@ -0,0 +1,5 @@ +// vsne.vi vd, vs2, simm5 +VI_VI_LOOP_CMP +({ + res = vs2 != simm5; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsne_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmsne_vv.h new file mode 100644 index 00000000..e6a7174a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsne_vv.h @@ -0,0 +1,5 @@ +// vneq.vv vd, vs2, vs1 +VI_VV_LOOP_CMP +({ + res = vs2 != vs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsne_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmsne_vx.h new file mode 100644 index 00000000..9e4c1553 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsne_vx.h @@ -0,0 +1,5 @@ +// vsne.vx vd, vs2, rs1 +VI_VX_LOOP_CMP +({ + res = vs2 != rs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsof_m.h b/vendor/riscv-isa-sim/riscv/insns/vmsof_m.h new file mode 100644 index 00000000..b9edcf3b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsof_m.h @@ -0,0 +1,30 @@ +// vmsof.m rd, vs2, vm +require(P.VU.vsew >= e8 && P.VU.vsew <= e64); +require_vector(true); +require(P.VU.vstart->read() == 0); +require_vm; +require(insn.rd() != insn.rs2()); + +reg_t vl = P.VU.vl->read(); +reg_t rd_num = insn.rd(); +reg_t rs2_num = insn.rs2(); + +bool has_one = false; +for (reg_t i = P.VU.vstart->read() ; i < vl; ++i) { + const int midx = i / 64; + const int mpos = i % 64; + const uint64_t mmask = UINT64_C(1) << mpos; \ + + bool vs2_lsb = ((P.VU.elt(rs2_num, midx ) >> mpos) & 0x1) == 1; + bool do_mask = (P.VU.elt(0, midx) >> mpos) & 0x1; + + if (insn.v_vm() == 1 || (insn.v_vm() == 0 && do_mask)) { + uint64_t &vd = P.VU.elt(rd_num, midx, true); + uint64_t res = 0; + if(!has_one && vs2_lsb) { + has_one = true; + res = 1; + } + vd = (vd & ~mmask) | ((res << mpos) & mmask); + } +} diff --git a/vendor/riscv-isa-sim/riscv/insns/vmul_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmul_vv.h new file mode 100644 index 00000000..a3278171 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmul_vv.h @@ -0,0 +1,5 @@ +// vmul vd, vs2, vs1 +VI_VV_LOOP +({ + vd = vs2 * vs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmul_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmul_vx.h new file mode 100644 index 00000000..8d683902 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmul_vx.h @@ -0,0 +1,5 @@ +// vmul vd, vs2, rs1 +VI_VX_LOOP +({ + vd = vs2 * rs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmulh_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmulh_vv.h new file mode 100644 index 00000000..e861a339 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmulh_vv.h @@ -0,0 +1,5 @@ +// vmulh vd, vs2, vs1 +VI_VV_LOOP +({ + vd = ((int128_t)vs2 * vs1) >> sew; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmulh_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmulh_vx.h new file mode 100644 index 00000000..b6b55036 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmulh_vx.h @@ -0,0 +1,5 @@ +// vmulh vd, vs2, rs1 +VI_VX_LOOP +({ + vd = ((int128_t)vs2 * rs1) >> sew; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmulhsu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmulhsu_vv.h new file mode 100644 index 00000000..e1c0ba60 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmulhsu_vv.h @@ -0,0 +1,4 @@ +// vmulhsu.vv vd, vs2, vs1 +VI_VV_SU_LOOP({ + vd = ((int128_t)vs2 * (uint128_t)vs1) >> sew; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmulhsu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmulhsu_vx.h new file mode 100644 index 00000000..4619ea89 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmulhsu_vx.h @@ -0,0 +1,4 @@ +// vmulhsu.vx vd, vs2, rs1 +VI_VX_SU_LOOP({ + vd = ((int128_t)vs2 * (uint128_t)rs1) >> sew; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmulhu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmulhu_vv.h new file mode 100644 index 00000000..8e318edb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmulhu_vv.h @@ -0,0 +1,5 @@ +// vmulhu vd ,vs2, vs1 +VI_VV_ULOOP +({ + vd = ((uint128_t)vs2 * vs1) >> sew; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmulhu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmulhu_vx.h new file mode 100644 index 00000000..672ad32d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmulhu_vx.h @@ -0,0 +1,5 @@ +// vmulhu vd ,vs2, rs1 +VI_VX_ULOOP +({ + vd = ((uint128_t)vs2 * rs1) >> sew; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmv1r_v.h b/vendor/riscv-isa-sim/riscv/insns/vmv1r_v.h new file mode 100644 index 00000000..bbdeab9a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmv1r_v.h @@ -0,0 +1,2 @@ +// vmv1r.v vd, vs2 +#include "vmvnfr_v.h" diff --git a/vendor/riscv-isa-sim/riscv/insns/vmv2r_v.h b/vendor/riscv-isa-sim/riscv/insns/vmv2r_v.h new file mode 100644 index 00000000..1ac8e09e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmv2r_v.h @@ -0,0 +1,2 @@ +// vmv2r.v vd, vs2 +#include "vmvnfr_v.h" diff --git a/vendor/riscv-isa-sim/riscv/insns/vmv4r_v.h b/vendor/riscv-isa-sim/riscv/insns/vmv4r_v.h new file mode 100644 index 00000000..2068731a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmv4r_v.h @@ -0,0 +1,2 @@ +// vmv4r.v vd, vs2 +#include "vmvnfr_v.h" diff --git a/vendor/riscv-isa-sim/riscv/insns/vmv8r_v.h b/vendor/riscv-isa-sim/riscv/insns/vmv8r_v.h new file mode 100644 index 00000000..2b205fc7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmv8r_v.h @@ -0,0 +1,2 @@ +// vmv8r.v vd, vs2 +#include "vmvnfr_v.h" diff --git a/vendor/riscv-isa-sim/riscv/insns/vmv_s_x.h b/vendor/riscv-isa-sim/riscv/insns/vmv_s_x.h new file mode 100644 index 00000000..b66855be --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmv_s_x.h @@ -0,0 +1,29 @@ +// vmv_s_x: vd[0] = rs1 +require_vector(true); +require(insn.v_vm() == 1); +require(P.VU.vsew >= e8 && P.VU.vsew <= e64); +reg_t vl = P.VU.vl->read(); + +if (vl > 0 && P.VU.vstart->read() < vl) { + reg_t rd_num = insn.rd(); + reg_t sew = P.VU.vsew; + + switch(sew) { + case e8: + P.VU.elt(rd_num, 0, true) = RS1; + break; + case e16: + P.VU.elt(rd_num, 0, true) = RS1; + break; + case e32: + P.VU.elt(rd_num, 0, true) = RS1; + break; + default: + P.VU.elt(rd_num, 0, true) = RS1; + break; + } + + vl = 0; +} + +P.VU.vstart->write(0); diff --git a/vendor/riscv-isa-sim/riscv/insns/vmv_v_i.h b/vendor/riscv-isa-sim/riscv/insns/vmv_v_i.h new file mode 100644 index 00000000..3d5737e8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmv_v_i.h @@ -0,0 +1,5 @@ +// vmv.v.i vd, simm5 +VI_VI_MERGE_LOOP +({ + vd = simm5; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmv_v_v.h b/vendor/riscv-isa-sim/riscv/insns/vmv_v_v.h new file mode 100644 index 00000000..429f5a33 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmv_v_v.h @@ -0,0 +1,5 @@ +// vvmv.v.v vd, vs1 +VI_VV_MERGE_LOOP +({ + vd = vs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmv_v_x.h b/vendor/riscv-isa-sim/riscv/insns/vmv_v_x.h new file mode 100644 index 00000000..1eac782e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmv_v_x.h @@ -0,0 +1,5 @@ +// vmv.v.x vd, rs1 +VI_VX_MERGE_LOOP +({ + vd = rs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmv_x_s.h b/vendor/riscv-isa-sim/riscv/insns/vmv_x_s.h new file mode 100644 index 00000000..d33c3e5d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmv_x_s.h @@ -0,0 +1,27 @@ +// vmv_x_s: rd = vs2[0] +require_vector(true); +require(insn.v_vm() == 1); +uint64_t xmask = UINT64_MAX >> (64 - P.get_isa().get_max_xlen()); +reg_t rs1 = RS1; +reg_t sew = P.VU.vsew; +reg_t rs2_num = insn.rs2(); + +switch(sew) { +case e8: + WRITE_RD(P.VU.elt(rs2_num, 0)); + break; +case e16: + WRITE_RD(P.VU.elt(rs2_num, 0)); + break; +case e32: + WRITE_RD(P.VU.elt(rs2_num, 0)); + break; +case e64: + if (P.get_isa().get_max_xlen() <= sew) + WRITE_RD(P.VU.elt(rs2_num, 0) & xmask); + else + WRITE_RD(P.VU.elt(rs2_num, 0)); + break; +} + +P.VU.vstart->write(0); diff --git a/vendor/riscv-isa-sim/riscv/insns/vmvnfr_v.h b/vendor/riscv-isa-sim/riscv/insns/vmvnfr_v.h new file mode 100644 index 00000000..f6dc2c08 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmvnfr_v.h @@ -0,0 +1,28 @@ +// vmv1r.v vd, vs2 +require_vector_novtype(true, true); +const reg_t baseAddr = RS1; +const reg_t vd = insn.rd(); +const reg_t vs2 = insn.rs2(); +const reg_t len = insn.rs1() + 1; +require_align(vd, len); +require_align(vs2, len); +const reg_t size = len * P.VU.vlenb; +const reg_t start = P.VU.vstart->read() * (P.VU.vsew >> 3); + +//register needs one-by-one copy to keep commitlog correct +if (vd != vs2 && start < size) { + reg_t i = start / P.VU.vlenb; + reg_t off = start % P.VU.vlenb; + if (off) { + memcpy(&P.VU.elt(vd + i, off, true), + &P.VU.elt(vs2 + i, off), P.VU.vlenb - off); + i++; + } + + for (; i < len; ++i) { + memcpy(&P.VU.elt(vd + i, 0, true), + &P.VU.elt(vs2 + i, 0), P.VU.vlenb); + } +} + +P.VU.vstart->write(0); diff --git a/vendor/riscv-isa-sim/riscv/insns/vmxnor_mm.h b/vendor/riscv-isa-sim/riscv/insns/vmxnor_mm.h new file mode 100644 index 00000000..0736d5b2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmxnor_mm.h @@ -0,0 +1,2 @@ +// vmnxor.mm vd, vs2, vs1 +VI_LOOP_MASK(~(vs2 ^ vs1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/vmxor_mm.h b/vendor/riscv-isa-sim/riscv/insns/vmxor_mm.h new file mode 100644 index 00000000..7f0c576e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmxor_mm.h @@ -0,0 +1,2 @@ +// vmxor.mm vd, vs2, vs1 +VI_LOOP_MASK(vs2 ^ vs1); diff --git a/vendor/riscv-isa-sim/riscv/insns/vnclip_wi.h b/vendor/riscv-isa-sim/riscv/insns/vnclip_wi.h new file mode 100644 index 00000000..ea6898cf --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vnclip_wi.h @@ -0,0 +1,25 @@ +// vnclip: vd[i] = clip(round(vs2[i] + rnd) >> simm) +VRM xrm = P.VU.get_vround_mode(); +int64_t int_max = INT64_MAX >> (64 - P.VU.vsew); +int64_t int_min = INT64_MIN >> (64 - P.VU.vsew); +VI_VI_LOOP_NARROW +({ + int128_t result = vs2; + unsigned shift = zimm5 & ((sew * 2) - 1); + + // rounding + INT_ROUNDING(result, xrm, shift); + + result = result >> shift; + + // saturation + if (result < int_min) { + result = int_min; + P_SET_OV(1); + } else if (result > int_max) { + result = int_max; + P_SET_OV(1); + } + + vd = result; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vnclip_wv.h b/vendor/riscv-isa-sim/riscv/insns/vnclip_wv.h new file mode 100644 index 00000000..63b84c65 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vnclip_wv.h @@ -0,0 +1,25 @@ +// vnclip: vd[i] = clip(round(vs2[i] + rnd) >> vs1[i]) +VRM xrm = P.VU.get_vround_mode(); +int64_t int_max = INT64_MAX >> (64 - P.VU.vsew); +int64_t int_min = INT64_MIN >> (64 - P.VU.vsew); +VI_VV_LOOP_NARROW +({ + int128_t result = vs2; + unsigned shift = vs1 & ((sew * 2) - 1); + + // rounding + INT_ROUNDING(result, xrm, shift); + + result = result >> shift; + + // saturation + if (result < int_min) { + result = int_min; + P_SET_OV(1); + } else if (result > int_max) { + result = int_max; + P_SET_OV(1); + } + + vd = result; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vnclip_wx.h b/vendor/riscv-isa-sim/riscv/insns/vnclip_wx.h new file mode 100644 index 00000000..482eace4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vnclip_wx.h @@ -0,0 +1,25 @@ +// vnclip: vd[i] = clip(round(vs2[i] + rnd) >> rs1[i]) +VRM xrm = P.VU.get_vround_mode(); +int64_t int_max = INT64_MAX >> (64 - P.VU.vsew); +int64_t int_min = INT64_MIN >> (64 - P.VU.vsew); +VI_VX_LOOP_NARROW +({ + int128_t result = vs2; + unsigned shift = rs1 & ((sew * 2) - 1); + + // rounding + INT_ROUNDING(result, xrm, shift); + + result = result >> shift; + + // saturation + if (result < int_min) { + result = int_min; + P_SET_OV(1); + } else if (result > int_max) { + result = int_max; + P_SET_OV(1); + } + + vd = result; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vnclipu_wi.h b/vendor/riscv-isa-sim/riscv/insns/vnclipu_wi.h new file mode 100644 index 00000000..441a3a7d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vnclipu_wi.h @@ -0,0 +1,23 @@ +// vnclipu: vd[i] = clip(round(vs2[i] + rnd) >> simm) +VRM xrm = P.VU.get_vround_mode(); +uint64_t uint_max = UINT64_MAX >> (64 - P.VU.vsew); +uint64_t sign_mask = UINT64_MAX << P.VU.vsew; +VI_VI_LOOP_NARROW +({ + uint128_t result = vs2_u; + unsigned shift = zimm5 & ((sew * 2) - 1); + + // rounding + INT_ROUNDING(result, xrm, shift); + + // unsigned shifting to rs1 + result = result >> shift; + + // saturation + if (result & sign_mask) { + result = uint_max; + P_SET_OV(1); + } + + vd = result; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vnclipu_wv.h b/vendor/riscv-isa-sim/riscv/insns/vnclipu_wv.h new file mode 100644 index 00000000..80724899 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vnclipu_wv.h @@ -0,0 +1,22 @@ +// vnclipu: vd[i] = clip(round(vs2[i] + rnd) >> vs1[i]) +VRM xrm = P.VU.get_vround_mode(); +uint64_t uint_max = UINT64_MAX >> (64 - P.VU.vsew); +uint64_t sign_mask = UINT64_MAX << P.VU.vsew; +VI_VV_LOOP_NARROW +({ + uint128_t result = vs2_u; + unsigned shift = vs1 & ((sew * 2) - 1); + + // rounding + INT_ROUNDING(result, xrm, shift); + + result = result >> shift; + + // saturation + if (result & sign_mask) { + result = uint_max; + P_SET_OV(1); + } + + vd = result; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vnclipu_wx.h b/vendor/riscv-isa-sim/riscv/insns/vnclipu_wx.h new file mode 100644 index 00000000..b2d91c33 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vnclipu_wx.h @@ -0,0 +1,22 @@ +// vnclipu: vd[i] = clip(round(vs2[i] + rnd) >> rs1[i]) +VRM xrm = P.VU.get_vround_mode(); +uint64_t uint_max = UINT64_MAX >> (64 - P.VU.vsew); +uint64_t sign_mask = UINT64_MAX << P.VU.vsew; +VI_VX_LOOP_NARROW +({ + uint128_t result = vs2_u; + unsigned shift = rs1 & ((sew * 2) - 1); + + // rounding + INT_ROUNDING(result, xrm, shift); + + result = result >> shift; + + // saturation + if (result & sign_mask) { + result = uint_max; + P_SET_OV(1); + } + + vd = result; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vnmsac_vv.h b/vendor/riscv-isa-sim/riscv/insns/vnmsac_vv.h new file mode 100644 index 00000000..7c10f29a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vnmsac_vv.h @@ -0,0 +1,5 @@ +// vmsac.vv: vd[i] = -(vs1[i] * vs2[i]) + vd[i] +VI_VV_LOOP +({ + vd = -(vs1 * vs2) + vd; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vnmsac_vx.h b/vendor/riscv-isa-sim/riscv/insns/vnmsac_vx.h new file mode 100644 index 00000000..44920be4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vnmsac_vx.h @@ -0,0 +1,5 @@ +// vmsac: vd[i] = -(x[rs1] * vs2[i]) + vd[i] +VI_VX_LOOP +({ + vd = -(rs1 * vs2) + vd; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vnmsub_vv.h b/vendor/riscv-isa-sim/riscv/insns/vnmsub_vv.h new file mode 100644 index 00000000..37f82286 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vnmsub_vv.h @@ -0,0 +1,5 @@ +// vnmsub.vv: vd[i] = -(vd[i] * vs1[i]) + vs2[i] +VI_VV_LOOP +({ + vd = -(vd * vs1) + vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vnmsub_vx.h b/vendor/riscv-isa-sim/riscv/insns/vnmsub_vx.h new file mode 100644 index 00000000..2e00d22e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vnmsub_vx.h @@ -0,0 +1,5 @@ +// vnmsub.vx: vd[i] = -(vd[i] * x[rs1]) + vs2[i] +VI_VX_LOOP +({ + vd = -(vd * rs1) + vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vnsra_wi.h b/vendor/riscv-isa-sim/riscv/insns/vnsra_wi.h new file mode 100644 index 00000000..0502ff1a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vnsra_wi.h @@ -0,0 +1,5 @@ +// vnsra.vi vd, vs2, zimm5 +VI_VI_LOOP_NSHIFT +({ + vd = vs2 >> (zimm5 & (sew * 2 - 1) & 0x1f); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vnsra_wv.h b/vendor/riscv-isa-sim/riscv/insns/vnsra_wv.h new file mode 100644 index 00000000..555ce3fb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vnsra_wv.h @@ -0,0 +1,5 @@ +// vnsra.vv vd, vs2, vs1 +VI_VV_LOOP_NSHIFT +({ + vd = vs2 >> (vs1 & (sew * 2 - 1)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vnsra_wx.h b/vendor/riscv-isa-sim/riscv/insns/vnsra_wx.h new file mode 100644 index 00000000..05a55e3e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vnsra_wx.h @@ -0,0 +1,5 @@ +// vnsra.vx vd, vs2, rs1 +VI_VX_LOOP_NSHIFT +({ + vd = vs2 >> (rs1 & (sew * 2 - 1)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vnsrl_wi.h b/vendor/riscv-isa-sim/riscv/insns/vnsrl_wi.h new file mode 100644 index 00000000..d4dfcf07 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vnsrl_wi.h @@ -0,0 +1,5 @@ +// vnsrl.vi vd, vs2, zimm5 +VI_VI_LOOP_NSHIFT +({ + vd = vs2_u >> (zimm5 & (sew * 2 - 1)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vnsrl_wv.h b/vendor/riscv-isa-sim/riscv/insns/vnsrl_wv.h new file mode 100644 index 00000000..ab72b849 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vnsrl_wv.h @@ -0,0 +1,5 @@ +// vnsrl.vv vd, vs2, vs1 +VI_VV_LOOP_NSHIFT +({ + vd = vs2_u >> (vs1 & (sew * 2 - 1)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vnsrl_wx.h b/vendor/riscv-isa-sim/riscv/insns/vnsrl_wx.h new file mode 100644 index 00000000..e149b38d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vnsrl_wx.h @@ -0,0 +1,5 @@ +// vnsrl.vx vd, vs2, rs1 +VI_VX_LOOP_NSHIFT +({ + vd = vs2_u >> (rs1 & (sew * 2 - 1)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vor_vi.h b/vendor/riscv-isa-sim/riscv/insns/vor_vi.h new file mode 100644 index 00000000..f7596074 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vor_vi.h @@ -0,0 +1,5 @@ +// vor +VI_VI_LOOP +({ + vd = simm5 | vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vor_vv.h b/vendor/riscv-isa-sim/riscv/insns/vor_vv.h new file mode 100644 index 00000000..0c460662 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vor_vv.h @@ -0,0 +1,5 @@ +// vor +VI_VV_LOOP +({ + vd = vs1 | vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vor_vx.h b/vendor/riscv-isa-sim/riscv/insns/vor_vx.h new file mode 100644 index 00000000..01c003ab --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vor_vx.h @@ -0,0 +1,5 @@ +// vor +VI_VX_LOOP +({ + vd = rs1 | vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vredand_vs.h b/vendor/riscv-isa-sim/riscv/insns/vredand_vs.h new file mode 100644 index 00000000..6c2d9089 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vredand_vs.h @@ -0,0 +1,5 @@ +// vredand.vs vd, vs2 ,vs1 +VI_VV_LOOP_REDUCTION +({ + vd_0_res &= vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vredmax_vs.h b/vendor/riscv-isa-sim/riscv/insns/vredmax_vs.h new file mode 100644 index 00000000..be2e76ab --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vredmax_vs.h @@ -0,0 +1,5 @@ +// vredmax.vs vd, vs2 ,vs1 +VI_VV_LOOP_REDUCTION +({ + vd_0_res = (vd_0_res >= vs2) ? vd_0_res : vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vredmaxu_vs.h b/vendor/riscv-isa-sim/riscv/insns/vredmaxu_vs.h new file mode 100644 index 00000000..960f4861 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vredmaxu_vs.h @@ -0,0 +1,5 @@ +// vredmaxu.vs vd, vs2 ,vs1 +VI_VV_ULOOP_REDUCTION +({ + vd_0_res = (vd_0_res >= vs2) ? vd_0_res : vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vredmin_vs.h b/vendor/riscv-isa-sim/riscv/insns/vredmin_vs.h new file mode 100644 index 00000000..50359b7a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vredmin_vs.h @@ -0,0 +1,5 @@ +// vredmin.vs vd, vs2 ,vs1 +VI_VV_LOOP_REDUCTION +({ + vd_0_res = (vd_0_res <= vs2) ? vd_0_res : vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vredminu_vs.h b/vendor/riscv-isa-sim/riscv/insns/vredminu_vs.h new file mode 100644 index 00000000..70824759 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vredminu_vs.h @@ -0,0 +1,5 @@ +// vredminu.vs vd, vs2 ,vs1 +VI_VV_ULOOP_REDUCTION +({ + vd_0_res = (vd_0_res <= vs2) ? vd_0_res : vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vredor_vs.h b/vendor/riscv-isa-sim/riscv/insns/vredor_vs.h new file mode 100644 index 00000000..f7acd9aa --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vredor_vs.h @@ -0,0 +1,5 @@ +// vredor.vs vd, vs2 ,vs1 +VI_VV_LOOP_REDUCTION +({ + vd_0_res |= vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vredsum_vs.h b/vendor/riscv-isa-sim/riscv/insns/vredsum_vs.h new file mode 100644 index 00000000..c4fefe57 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vredsum_vs.h @@ -0,0 +1,5 @@ +// vredsum.vs vd, vs2 ,vs1 +VI_VV_LOOP_REDUCTION +({ + vd_0_res += vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vredxor_vs.h b/vendor/riscv-isa-sim/riscv/insns/vredxor_vs.h new file mode 100644 index 00000000..bb81ad9a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vredxor_vs.h @@ -0,0 +1,5 @@ +// vredxor.vs vd, vs2 ,vs1 +VI_VV_LOOP_REDUCTION +({ + vd_0_res ^= vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vrem_vv.h b/vendor/riscv-isa-sim/riscv/insns/vrem_vv.h new file mode 100644 index 00000000..260716a0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vrem_vv.h @@ -0,0 +1,11 @@ +// vrem.vv vd, vs2, vs1 +VI_VV_LOOP +({ + if (vs1 == 0) + vd = vs2; + else if(vs2 == -(((intmax_t)1) << (sew - 1)) && vs1 == -1) + vd = 0; + else { + vd = vs2 % vs1; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vrem_vx.h b/vendor/riscv-isa-sim/riscv/insns/vrem_vx.h new file mode 100644 index 00000000..3702f02f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vrem_vx.h @@ -0,0 +1,10 @@ +// vrem.vx vd, vs2, rs1 +VI_VX_LOOP +({ + if (rs1 == 0) + vd = vs2; + else if (vs2 == -(((intmax_t)1) << (sew - 1)) && rs1 == -1) + vd = 0; + else + vd = vs2 % rs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vremu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vremu_vv.h new file mode 100644 index 00000000..7e150723 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vremu_vv.h @@ -0,0 +1,8 @@ +// vremu.vv vd, vs2, vs1 +VI_VV_ULOOP +({ + if (vs1 == 0) + vd = vs2; + else + vd = vs2 % vs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vremu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vremu_vx.h new file mode 100644 index 00000000..a87a8200 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vremu_vx.h @@ -0,0 +1,8 @@ +// vremu.vx vd, vs2, rs1 +VI_VX_ULOOP +({ + if (rs1 == 0) + vd = vs2; + else + vd = vs2 % rs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vrgather_vi.h b/vendor/riscv-isa-sim/riscv/insns/vrgather_vi.h new file mode 100644 index 00000000..56e11e16 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vrgather_vi.h @@ -0,0 +1,30 @@ +// vrgather.vi vd, vs2, zimm5 vm # vd[i] = (zimm5 >= VLMAX) ? 0 : vs2[zimm5]; +require_align(insn.rd(), P.VU.vflmul); +require_align(insn.rs2(), P.VU.vflmul); +require(insn.rd() != insn.rs2()); +require_vm; + +reg_t zimm5 = insn.v_zimm5(); + +VI_LOOP_BASE + +for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { + VI_LOOP_ELEMENT_SKIP(); + + switch (sew) { + case e8: + P.VU.elt(rd_num, i, true) = zimm5 >= P.VU.vlmax ? 0 : P.VU.elt(rs2_num, zimm5); + break; + case e16: + P.VU.elt(rd_num, i, true) = zimm5 >= P.VU.vlmax ? 0 : P.VU.elt(rs2_num, zimm5); + break; + case e32: + P.VU.elt(rd_num, i, true) = zimm5 >= P.VU.vlmax ? 0 : P.VU.elt(rs2_num, zimm5); + break; + default: + P.VU.elt(rd_num, i, true) = zimm5 >= P.VU.vlmax ? 0 : P.VU.elt(rs2_num, zimm5); + break; + } +} + +VI_LOOP_END; diff --git a/vendor/riscv-isa-sim/riscv/insns/vrgather_vv.h b/vendor/riscv-isa-sim/riscv/insns/vrgather_vv.h new file mode 100644 index 00000000..a3a32f56 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vrgather_vv.h @@ -0,0 +1,32 @@ +// vrgather.vv vd, vs2, vs1, vm # vd[i] = (vs1[i] >= VLMAX) ? 0 : vs2[vs1[i]]; +require_align(insn.rd(), P.VU.vflmul); +require_align(insn.rs2(), P.VU.vflmul); +require_align(insn.rs1(), P.VU.vflmul); +require(insn.rd() != insn.rs2() && insn.rd() != insn.rs1()); +require_vm; + +VI_LOOP_BASE + switch (sew) { + case e8: { + auto vs1 = P.VU.elt(rs1_num, i); + //if (i > 255) continue; + P.VU.elt(rd_num, i, true) = vs1 >= P.VU.vlmax ? 0 : P.VU.elt(rs2_num, vs1); + break; + } + case e16: { + auto vs1 = P.VU.elt(rs1_num, i); + P.VU.elt(rd_num, i, true) = vs1 >= P.VU.vlmax ? 0 : P.VU.elt(rs2_num, vs1); + break; + } + case e32: { + auto vs1 = P.VU.elt(rs1_num, i); + P.VU.elt(rd_num, i, true) = vs1 >= P.VU.vlmax ? 0 : P.VU.elt(rs2_num, vs1); + break; + } + default: { + auto vs1 = P.VU.elt(rs1_num, i); + P.VU.elt(rd_num, i, true) = vs1 >= P.VU.vlmax ? 0 : P.VU.elt(rs2_num, vs1); + break; + } + } +VI_LOOP_END; diff --git a/vendor/riscv-isa-sim/riscv/insns/vrgather_vx.h b/vendor/riscv-isa-sim/riscv/insns/vrgather_vx.h new file mode 100644 index 00000000..058ffae1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vrgather_vx.h @@ -0,0 +1,24 @@ +// vrgather.vx vd, vs2, rs1, vm # vd[i] = (rs1 >= VLMAX) ? 0 : vs2[rs1]; +require_align(insn.rd(), P.VU.vflmul); +require_align(insn.rs2(), P.VU.vflmul); +require(insn.rd() != insn.rs2()); +require_vm; + +reg_t rs1 = RS1; + +VI_LOOP_BASE + switch (sew) { + case e8: + P.VU.elt(rd_num, i, true) = rs1 >= P.VU.vlmax ? 0 : P.VU.elt(rs2_num, rs1); + break; + case e16: + P.VU.elt(rd_num, i, true) = rs1 >= P.VU.vlmax ? 0 : P.VU.elt(rs2_num, rs1); + break; + case e32: + P.VU.elt(rd_num, i, true) = rs1 >= P.VU.vlmax ? 0 : P.VU.elt(rs2_num, rs1); + break; + default: + P.VU.elt(rd_num, i, true) = rs1 >= P.VU.vlmax ? 0 : P.VU.elt(rs2_num, rs1); + break; + } +VI_LOOP_END; diff --git a/vendor/riscv-isa-sim/riscv/insns/vrgatherei16_vv.h b/vendor/riscv-isa-sim/riscv/insns/vrgatherei16_vv.h new file mode 100644 index 00000000..3bb166a2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vrgatherei16_vv.h @@ -0,0 +1,34 @@ +// vrgatherei16.vv vd, vs2, vs1, vm # vd[i] = (vs1[i] >= VLMAX) ? 0 : vs2[vs1[i]]; +float vemul = (16.0 / P.VU.vsew * P.VU.vflmul); +require(vemul >= 0.125 && vemul <= 8); +require_align(insn.rd(), P.VU.vflmul); +require_align(insn.rs2(), P.VU.vflmul); +require_align(insn.rs1(), vemul); +require_noover(insn.rd(), P.VU.vflmul, insn.rs1(), vemul); +require(insn.rd() != insn.rs2()); +require_vm; + +VI_LOOP_BASE + switch (sew) { + case e8: { + auto vs1 = P.VU.elt(rs1_num, i); + P.VU.elt(rd_num, i, true) = vs1 >= P.VU.vlmax ? 0 : P.VU.elt(rs2_num, vs1); + break; + } + case e16: { + auto vs1 = P.VU.elt(rs1_num, i); + P.VU.elt(rd_num, i, true) = vs1 >= P.VU.vlmax ? 0 : P.VU.elt(rs2_num, vs1); + break; + } + case e32: { + auto vs1 = P.VU.elt(rs1_num, i); + P.VU.elt(rd_num, i, true) = vs1 >= P.VU.vlmax ? 0 : P.VU.elt(rs2_num, vs1); + break; + } + default: { + auto vs1 = P.VU.elt(rs1_num, i); + P.VU.elt(rd_num, i, true) = vs1 >= P.VU.vlmax ? 0 : P.VU.elt(rs2_num, vs1); + break; + } + } +VI_LOOP_END; diff --git a/vendor/riscv-isa-sim/riscv/insns/vrsub_vi.h b/vendor/riscv-isa-sim/riscv/insns/vrsub_vi.h new file mode 100644 index 00000000..198c33f9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vrsub_vi.h @@ -0,0 +1,5 @@ +// vrsub.vi vd, vs2, imm, vm # vd[i] = imm - vs2[i] +VI_VI_LOOP +({ + vd = simm5 - vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vrsub_vx.h b/vendor/riscv-isa-sim/riscv/insns/vrsub_vx.h new file mode 100644 index 00000000..bfd62594 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vrsub_vx.h @@ -0,0 +1,5 @@ +// vrsub.vx vd, vs2, rs1, vm # vd[i] = rs1 - vs2[i] +VI_VX_LOOP +({ + vd = rs1 - vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vs1r_v.h b/vendor/riscv-isa-sim/riscv/insns/vs1r_v.h new file mode 100644 index 00000000..1932ec0b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vs1r_v.h @@ -0,0 +1,2 @@ +// vs1r.v vs3, (rs1) +VI_ST_WHOLE diff --git a/vendor/riscv-isa-sim/riscv/insns/vs2r_v.h b/vendor/riscv-isa-sim/riscv/insns/vs2r_v.h new file mode 100644 index 00000000..2e515b47 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vs2r_v.h @@ -0,0 +1,2 @@ +// vs2r.v vs3, (rs1) +VI_ST_WHOLE diff --git a/vendor/riscv-isa-sim/riscv/insns/vs4r_v.h b/vendor/riscv-isa-sim/riscv/insns/vs4r_v.h new file mode 100644 index 00000000..161bf89a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vs4r_v.h @@ -0,0 +1,2 @@ +// vs4r.v vs3, (rs1) +VI_ST_WHOLE diff --git a/vendor/riscv-isa-sim/riscv/insns/vs8r_v.h b/vendor/riscv-isa-sim/riscv/insns/vs8r_v.h new file mode 100644 index 00000000..1ad25756 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vs8r_v.h @@ -0,0 +1,2 @@ +// vs8r.v vs3, (rs1) +VI_ST_WHOLE diff --git a/vendor/riscv-isa-sim/riscv/insns/vsadd_vi.h b/vendor/riscv-isa-sim/riscv/insns/vsadd_vi.h new file mode 100644 index 00000000..7e3b652e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsadd_vi.h @@ -0,0 +1,28 @@ +// vsadd.vi vd, vs2 simm5 +VI_CHECK_SSS(false); +VI_LOOP_BASE +bool sat = false; +switch(sew) { +case e8: { + VI_PARAMS(e8); + vd = sat_add(vs2, vsext(simm5, sew), sat); + break; +} +case e16: { + VI_PARAMS(e16); + vd = sat_add(vs2, vsext(simm5, sew), sat); + break; +} +case e32: { + VI_PARAMS(e32); + vd = sat_add(vs2, vsext(simm5, sew), sat); + break; +} +default: { + VI_PARAMS(e64); + vd = sat_add(vs2, vsext(simm5, sew), sat); + break; +} +} +P_SET_OV(sat); +VI_LOOP_END diff --git a/vendor/riscv-isa-sim/riscv/insns/vsadd_vv.h b/vendor/riscv-isa-sim/riscv/insns/vsadd_vv.h new file mode 100644 index 00000000..60ad5f3c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsadd_vv.h @@ -0,0 +1,28 @@ +// vsadd.vv vd, vs2, vs1 +VI_CHECK_SSS(true); +VI_LOOP_BASE +bool sat = false; +switch(sew) { +case e8: { + VV_PARAMS(e8); + vd = sat_add(vs2, vs1, sat); + break; +} +case e16: { + VV_PARAMS(e16); + vd = sat_add(vs2, vs1, sat); + break; +} +case e32: { + VV_PARAMS(e32); + vd = sat_add(vs2, vs1, sat); + break; +} +default: { + VV_PARAMS(e64); + vd = sat_add(vs2, vs1, sat); + break; +} +} +P_SET_OV(sat); +VI_LOOP_END diff --git a/vendor/riscv-isa-sim/riscv/insns/vsadd_vx.h b/vendor/riscv-isa-sim/riscv/insns/vsadd_vx.h new file mode 100644 index 00000000..bf68f151 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsadd_vx.h @@ -0,0 +1,28 @@ +// vsadd.vx vd, vs2, rs1 +VI_CHECK_SSS(false); +VI_LOOP_BASE +bool sat = false; +switch(sew) { +case e8: { + VX_PARAMS(e8); + vd = sat_add(vs2, rs1, sat); + break; +} +case e16: { + VX_PARAMS(e16); + vd = sat_add(vs2, rs1, sat); + break; +} +case e32: { + VX_PARAMS(e32); + vd = sat_add(vs2, rs1, sat); + break; +} +default: { + VX_PARAMS(e64); + vd = sat_add(vs2, rs1, sat); + break; +} +} +P_SET_OV(sat); +VI_LOOP_END diff --git a/vendor/riscv-isa-sim/riscv/insns/vsaddu_vi.h b/vendor/riscv-isa-sim/riscv/insns/vsaddu_vi.h new file mode 100644 index 00000000..38607140 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsaddu_vi.h @@ -0,0 +1,11 @@ +// vsaddu vd, vs2, zimm5 +VI_VI_ULOOP +({ + bool sat = false; + vd = vs2 + (insn.v_simm5() & (UINT64_MAX >> (64 - P.VU.vsew))); + + sat = vd < vs2; + vd |= -(vd < vs2); + + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vsaddu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vsaddu_vv.h new file mode 100644 index 00000000..a0cba811 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsaddu_vv.h @@ -0,0 +1,11 @@ +// vsaddu vd, vs2, vs1 +VI_VV_ULOOP +({ + bool sat = false; + vd = vs2 + vs1; + + sat = vd < vs2; + vd |= -(vd < vs2); + + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vsaddu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vsaddu_vx.h new file mode 100644 index 00000000..c0a7d872 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsaddu_vx.h @@ -0,0 +1,12 @@ +// vsaddu vd, vs2, rs1 +VI_VX_ULOOP +({ + bool sat = false; + vd = vs2 + rs1; + + sat = vd < vs2; + vd |= -(vd < vs2); + + P_SET_OV(sat); + +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vsbc_vvm.h b/vendor/riscv-isa-sim/riscv/insns/vsbc_vvm.h new file mode 100644 index 00000000..8ab6d446 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsbc_vvm.h @@ -0,0 +1,5 @@ +// vsbc.vvm vd, vs2, rs1, v0 +VI_VV_LOOP_WITH_CARRY +({ + vd = (uint128_t)((op_mask & vs2) - (op_mask & vs1) - carry); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vsbc_vxm.h b/vendor/riscv-isa-sim/riscv/insns/vsbc_vxm.h new file mode 100644 index 00000000..fc983218 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsbc_vxm.h @@ -0,0 +1,5 @@ +// vsbc.vxm vd, vs2, rs1, v0 +VI_XI_LOOP_WITH_CARRY +({ + vd = (uint128_t)((op_mask & vs2) - (op_mask & rs1) - carry); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vse16_v.h b/vendor/riscv-isa-sim/riscv/insns/vse16_v.h new file mode 100644 index 00000000..9f9afecb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vse16_v.h @@ -0,0 +1,2 @@ +// vse16.v and vsseg[2-8]e16.v +VI_ST(0, (i * nf + fn), uint16, false); diff --git a/vendor/riscv-isa-sim/riscv/insns/vse32_v.h b/vendor/riscv-isa-sim/riscv/insns/vse32_v.h new file mode 100644 index 00000000..1c6a2310 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vse32_v.h @@ -0,0 +1,2 @@ +// vse32.v and vsseg[2-8]e32.v +VI_ST(0, (i * nf + fn), uint32, false); diff --git a/vendor/riscv-isa-sim/riscv/insns/vse64_v.h b/vendor/riscv-isa-sim/riscv/insns/vse64_v.h new file mode 100644 index 00000000..61d0ba64 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vse64_v.h @@ -0,0 +1,2 @@ +// vse64.v and vsseg[2-8]e64.v +VI_ST(0, (i * nf + fn), uint64, false); diff --git a/vendor/riscv-isa-sim/riscv/insns/vse8_v.h b/vendor/riscv-isa-sim/riscv/insns/vse8_v.h new file mode 100644 index 00000000..01f59ceb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vse8_v.h @@ -0,0 +1,2 @@ +// vse8.v and vsseg[2-8]e8.v +VI_ST(0, (i * nf + fn), uint8, false); diff --git a/vendor/riscv-isa-sim/riscv/insns/vsetivli.h b/vendor/riscv-isa-sim/riscv/insns/vsetivli.h new file mode 100644 index 00000000..04900a2f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsetivli.h @@ -0,0 +1,2 @@ +require_vector_novtype(false, false); +WRITE_RD(P.VU.set_vl(insn.rd(), -1, insn.rs1(), insn.v_zimm10())); diff --git a/vendor/riscv-isa-sim/riscv/insns/vsetvl.h b/vendor/riscv-isa-sim/riscv/insns/vsetvl.h new file mode 100644 index 00000000..2969edc6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsetvl.h @@ -0,0 +1,2 @@ +require_vector_novtype(false, false); +WRITE_RD(P.VU.set_vl(insn.rd(), insn.rs1(), RS1, RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/vsetvli.h b/vendor/riscv-isa-sim/riscv/insns/vsetvli.h new file mode 100644 index 00000000..7b1f1d71 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsetvli.h @@ -0,0 +1,2 @@ +require_vector_novtype(false, false); +WRITE_RD(P.VU.set_vl(insn.rd(), insn.rs1(), RS1, insn.v_zimm11())); diff --git a/vendor/riscv-isa-sim/riscv/insns/vsext_vf2.h b/vendor/riscv-isa-sim/riscv/insns/vsext_vf2.h new file mode 100644 index 00000000..16ccfac6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsext_vf2.h @@ -0,0 +1 @@ +VI_VV_EXT(2, int); diff --git a/vendor/riscv-isa-sim/riscv/insns/vsext_vf4.h b/vendor/riscv-isa-sim/riscv/insns/vsext_vf4.h new file mode 100644 index 00000000..d4476a31 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsext_vf4.h @@ -0,0 +1 @@ +VI_VV_EXT(4, int); diff --git a/vendor/riscv-isa-sim/riscv/insns/vsext_vf8.h b/vendor/riscv-isa-sim/riscv/insns/vsext_vf8.h new file mode 100644 index 00000000..09fdc2c7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsext_vf8.h @@ -0,0 +1 @@ +VI_VV_EXT(8, int); diff --git a/vendor/riscv-isa-sim/riscv/insns/vslide1down_vx.h b/vendor/riscv-isa-sim/riscv/insns/vslide1down_vx.h new file mode 100644 index 00000000..e867722f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vslide1down_vx.h @@ -0,0 +1,44 @@ +//vslide1down.vx vd, vs2, rs1 +VI_CHECK_SLIDE(false); + +VI_LOOP_BASE +if (i != vl - 1) { + switch (sew) { + case e8: { + VI_XI_SLIDEDOWN_PARAMS(e8, 1); + vd = vs2; + } + break; + case e16: { + VI_XI_SLIDEDOWN_PARAMS(e16, 1); + vd = vs2; + } + break; + case e32: { + VI_XI_SLIDEDOWN_PARAMS(e32, 1); + vd = vs2; + } + break; + default: { + VI_XI_SLIDEDOWN_PARAMS(e64, 1); + vd = vs2; + } + break; + } +} else { + switch (sew) { + case e8: + P.VU.elt(rd_num, vl - 1, true) = RS1; + break; + case e16: + P.VU.elt(rd_num, vl - 1, true) = RS1; + break; + case e32: + P.VU.elt(rd_num, vl - 1, true) = RS1; + break; + default: + P.VU.elt(rd_num, vl - 1, true) = RS1; + break; + } +} +VI_LOOP_END diff --git a/vendor/riscv-isa-sim/riscv/insns/vslide1up_vx.h b/vendor/riscv-isa-sim/riscv/insns/vslide1up_vx.h new file mode 100644 index 00000000..33cb9ed6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vslide1up_vx.h @@ -0,0 +1,30 @@ +//vslide1up.vx vd, vs2, rs1 +VI_CHECK_SLIDE(true); + +VI_LOOP_BASE +if (i != 0) { + if (sew == e8) { + VI_XI_SLIDEUP_PARAMS(e8, 1); + vd = vs2; + } else if(sew == e16) { + VI_XI_SLIDEUP_PARAMS(e16, 1); + vd = vs2; + } else if(sew == e32) { + VI_XI_SLIDEUP_PARAMS(e32, 1); + vd = vs2; + } else if(sew == e64) { + VI_XI_SLIDEUP_PARAMS(e64, 1); + vd = vs2; + } +} else { + if (sew == e8) { + P.VU.elt(rd_num, 0, true) = RS1; + } else if(sew == e16) { + P.VU.elt(rd_num, 0, true) = RS1; + } else if(sew == e32) { + P.VU.elt(rd_num, 0, true) = RS1; + } else if(sew == e64) { + P.VU.elt(rd_num, 0, true) = RS1; + } +} +VI_LOOP_END diff --git a/vendor/riscv-isa-sim/riscv/insns/vslidedown_vi.h b/vendor/riscv-isa-sim/riscv/insns/vslidedown_vi.h new file mode 100644 index 00000000..bc440cf2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vslidedown_vi.h @@ -0,0 +1,36 @@ +// vslidedown.vi vd, vs2, rs1 +VI_CHECK_SLIDE(false); + +const reg_t sh = insn.v_zimm5(); +VI_LOOP_BASE + +reg_t offset = 0; +bool is_valid = (i + sh) < P.VU.vlmax; + +if (is_valid) { + offset = sh; +} + +switch (sew) { +case e8: { + VI_XI_SLIDEDOWN_PARAMS(e8, offset); + vd = is_valid ? vs2 : 0; +} +break; +case e16: { + VI_XI_SLIDEDOWN_PARAMS(e16, offset); + vd = is_valid ? vs2 : 0; +} +break; +case e32: { + VI_XI_SLIDEDOWN_PARAMS(e32, offset); + vd = is_valid ? vs2 : 0; +} +break; +default: { + VI_XI_SLIDEDOWN_PARAMS(e64, offset); + vd = is_valid ? vs2 : 0; +} +break; +} +VI_LOOP_END diff --git a/vendor/riscv-isa-sim/riscv/insns/vslidedown_vx.h b/vendor/riscv-isa-sim/riscv/insns/vslidedown_vx.h new file mode 100644 index 00000000..074aa508 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vslidedown_vx.h @@ -0,0 +1,36 @@ +//vslidedown.vx vd, vs2, rs1 +VI_CHECK_SLIDE(false); + +const uint128_t sh = RS1; +VI_LOOP_BASE + +reg_t offset = 0; +bool is_valid = (i + sh) < P.VU.vlmax; + +if (is_valid) { + offset = sh; +} + +switch (sew) { +case e8: { + VI_XI_SLIDEDOWN_PARAMS(e8, offset); + vd = is_valid ? vs2 : 0; +} +break; +case e16: { + VI_XI_SLIDEDOWN_PARAMS(e16, offset); + vd = is_valid ? vs2 : 0; +} +break; +case e32: { + VI_XI_SLIDEDOWN_PARAMS(e32, offset); + vd = is_valid ? vs2 : 0; +} +break; +default: { + VI_XI_SLIDEDOWN_PARAMS(e64, offset); + vd = is_valid ? vs2 : 0; +} +break; +} +VI_LOOP_END diff --git a/vendor/riscv-isa-sim/riscv/insns/vslideup_vi.h b/vendor/riscv-isa-sim/riscv/insns/vslideup_vi.h new file mode 100644 index 00000000..3d537944 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vslideup_vi.h @@ -0,0 +1,31 @@ +// vslideup.vi vd, vs2, rs1 +VI_CHECK_SLIDE(true); + +const reg_t offset = insn.v_zimm5(); +VI_LOOP_BASE +if (P.VU.vstart->read() < offset && i < offset) + continue; + +switch (sew) { +case e8: { + VI_XI_SLIDEUP_PARAMS(e8, offset); + vd = vs2; +} +break; +case e16: { + VI_XI_SLIDEUP_PARAMS(e16, offset); + vd = vs2; +} +break; +case e32: { + VI_XI_SLIDEUP_PARAMS(e32, offset); + vd = vs2; +} +break; +default: { + VI_XI_SLIDEUP_PARAMS(e64, offset); + vd = vs2; +} +break; +} +VI_LOOP_END diff --git a/vendor/riscv-isa-sim/riscv/insns/vslideup_vx.h b/vendor/riscv-isa-sim/riscv/insns/vslideup_vx.h new file mode 100644 index 00000000..43d41fb3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vslideup_vx.h @@ -0,0 +1,31 @@ +//vslideup.vx vd, vs2, rs1 +VI_CHECK_SLIDE(true); + +const reg_t offset = RS1; +VI_LOOP_BASE +if (P.VU.vstart->read() < offset && i < offset) + continue; + +switch (sew) { +case e8: { + VI_XI_SLIDEUP_PARAMS(e8, offset); + vd = vs2; +} +break; +case e16: { + VI_XI_SLIDEUP_PARAMS(e16, offset); + vd = vs2; +} +break; +case e32: { + VI_XI_SLIDEUP_PARAMS(e32, offset); + vd = vs2; +} +break; +default: { + VI_XI_SLIDEUP_PARAMS(e64, offset); + vd = vs2; +} +break; +} +VI_LOOP_END diff --git a/vendor/riscv-isa-sim/riscv/insns/vsll_vi.h b/vendor/riscv-isa-sim/riscv/insns/vsll_vi.h new file mode 100644 index 00000000..be465066 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsll_vi.h @@ -0,0 +1,5 @@ +// vsll.vi vd, vs2, zimm5 +VI_VI_LOOP +({ + vd = vs2 << (simm5 & (sew - 1) & 0x1f); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vsll_vv.h b/vendor/riscv-isa-sim/riscv/insns/vsll_vv.h new file mode 100644 index 00000000..ce820225 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsll_vv.h @@ -0,0 +1,5 @@ +// vsll +VI_VV_LOOP +({ + vd = vs2 << (vs1 & (sew - 1)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vsll_vx.h b/vendor/riscv-isa-sim/riscv/insns/vsll_vx.h new file mode 100644 index 00000000..823510b2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsll_vx.h @@ -0,0 +1,5 @@ +// vsll +VI_VX_LOOP +({ + vd = vs2 << (rs1 & (sew - 1)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vsm_v.h b/vendor/riscv-isa-sim/riscv/insns/vsm_v.h new file mode 100644 index 00000000..e1d468be --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsm_v.h @@ -0,0 +1,2 @@ +// vse1.v +VI_ST(0, (i * nf + fn), uint8, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/vsmul_vv.h b/vendor/riscv-isa-sim/riscv/insns/vsmul_vv.h new file mode 100644 index 00000000..413981ce --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsmul_vv.h @@ -0,0 +1,32 @@ +// vsmul.vv vd, vs2, vs1 +VRM xrm = P.VU.get_vround_mode(); +int64_t int_max = INT64_MAX >> (64 - P.VU.vsew); +int64_t int_min = INT64_MIN >> (64 - P.VU.vsew); +int64_t sign_mask = uint64_t(1) << (P.VU.vsew - 1); + +VI_VV_LOOP +({ + int64_t vs1_sign; + int64_t vs2_sign; + int64_t result_sign; + + vs1_sign = vs1 & sign_mask; + vs2_sign = vs2 & sign_mask; + bool overflow = vs1 == vs2 && vs1 == int_min; + + int128_t result = (int128_t)vs1 * (int128_t)vs2; + result_sign = (vs1_sign ^ vs2_sign) & sign_mask; + + // rounding + INT_ROUNDING(result, xrm, sew - 1); + // remove guard bits + result = result >> (sew - 1); + + // saturation + if (overflow) { + result = int_max; + P_SET_OV(1); + } + + vd = result; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vsmul_vx.h b/vendor/riscv-isa-sim/riscv/insns/vsmul_vx.h new file mode 100644 index 00000000..2e25670d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsmul_vx.h @@ -0,0 +1,33 @@ +// vsmul.vx vd, vs2, rs1 +VRM xrm = P.VU.get_vround_mode(); +int64_t int_max = INT64_MAX >> (64 - P.VU.vsew); +int64_t int_min = INT64_MIN >> (64 - P.VU.vsew); +int64_t sign_mask = uint64_t(1) << (P.VU.vsew - 1); + +VI_VX_LOOP +({ + int64_t rs1_sign; + int64_t vs2_sign; + int64_t result_sign; + + rs1_sign = rs1 & sign_mask; + vs2_sign = vs2 & sign_mask; + bool overflow = rs1 == vs2 && rs1 == int_min; + + int128_t result = (int128_t)rs1 * (int128_t)vs2; + result_sign = (rs1_sign ^ vs2_sign) & sign_mask; + + // rounding + INT_ROUNDING(result, xrm, sew - 1); + + // remove guard bits + result = result >> (sew - 1); + + // max saturation + if (overflow) { + result = int_max; + P_SET_OV(1); + } + + vd = result; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vsoxei16_v.h b/vendor/riscv-isa-sim/riscv/insns/vsoxei16_v.h new file mode 100644 index 00000000..42c3c78d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsoxei16_v.h @@ -0,0 +1,2 @@ +// vsxei16.v and vsxseg[2-8]ei16.v +VI_ST_INDEX(e16, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/vsoxei32_v.h b/vendor/riscv-isa-sim/riscv/insns/vsoxei32_v.h new file mode 100644 index 00000000..f0aed6bd --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsoxei32_v.h @@ -0,0 +1,2 @@ +// vsxei32.v and vsxseg[2-8]ei32.v +VI_ST_INDEX(e32, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/vsoxei64_v.h b/vendor/riscv-isa-sim/riscv/insns/vsoxei64_v.h new file mode 100644 index 00000000..88ddaf3f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsoxei64_v.h @@ -0,0 +1,2 @@ +// vsxei64.v and vsxseg[2-8]ei64.v +VI_ST_INDEX(e64, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/vsoxei8_v.h b/vendor/riscv-isa-sim/riscv/insns/vsoxei8_v.h new file mode 100644 index 00000000..621512c5 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsoxei8_v.h @@ -0,0 +1,2 @@ +// vsxei8.v and vsxseg[2-8]ei8.v +VI_ST_INDEX(e8, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/vsra_vi.h b/vendor/riscv-isa-sim/riscv/insns/vsra_vi.h new file mode 100644 index 00000000..5c589274 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsra_vi.h @@ -0,0 +1,5 @@ +// vsra.vi vd, vs2, zimm5 +VI_VI_LOOP +({ + vd = vs2 >> (simm5 & (sew - 1) & 0x1f); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vsra_vv.h b/vendor/riscv-isa-sim/riscv/insns/vsra_vv.h new file mode 100644 index 00000000..8889af9c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsra_vv.h @@ -0,0 +1,5 @@ +// vsra.vv vd, vs2, vs1 +VI_VV_LOOP +({ + vd = vs2 >> (vs1 & (sew - 1)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vsra_vx.h b/vendor/riscv-isa-sim/riscv/insns/vsra_vx.h new file mode 100644 index 00000000..c1b0c107 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsra_vx.h @@ -0,0 +1,5 @@ +// vsra.vx vd, vs2, rs1 +VI_VX_LOOP +({ + vd = vs2 >> (rs1 & (sew - 1)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vsrl_vi.h b/vendor/riscv-isa-sim/riscv/insns/vsrl_vi.h new file mode 100644 index 00000000..fe5d2720 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsrl_vi.h @@ -0,0 +1,5 @@ +// vsrl.vi vd, vs2, zimm5 +VI_VI_ULOOP +({ + vd = vs2 >> (zimm5 & (sew - 1) & 0x1f); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vsrl_vv.h b/vendor/riscv-isa-sim/riscv/insns/vsrl_vv.h new file mode 100644 index 00000000..6376af36 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsrl_vv.h @@ -0,0 +1,5 @@ +// vsrl.vv vd, vs2, vs1 +VI_VV_ULOOP +({ + vd = vs2 >> (vs1 & (sew - 1)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vsrl_vx.h b/vendor/riscv-isa-sim/riscv/insns/vsrl_vx.h new file mode 100644 index 00000000..a4f899ca --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsrl_vx.h @@ -0,0 +1,5 @@ +// vsrl.vx vd, vs2, rs1 +VI_VX_ULOOP +({ + vd = vs2 >> (rs1 & (sew - 1)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vsse16_v.h b/vendor/riscv-isa-sim/riscv/insns/vsse16_v.h new file mode 100644 index 00000000..5dcbaf9f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsse16_v.h @@ -0,0 +1,2 @@ +// vsse16v and vssseg[2-8]e16.v +VI_ST(i * RS2, fn, uint16, false); diff --git a/vendor/riscv-isa-sim/riscv/insns/vsse32_v.h b/vendor/riscv-isa-sim/riscv/insns/vsse32_v.h new file mode 100644 index 00000000..80276b25 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsse32_v.h @@ -0,0 +1,2 @@ +// vsse32.v and vssseg[2-8]e32.v +VI_ST(i * RS2, fn, uint32, false); diff --git a/vendor/riscv-isa-sim/riscv/insns/vsse64_v.h b/vendor/riscv-isa-sim/riscv/insns/vsse64_v.h new file mode 100644 index 00000000..a4b6290b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsse64_v.h @@ -0,0 +1,2 @@ +// vsse64.v and vssseg[2-8]e64.v +VI_ST(i * RS2, fn, uint64, false); diff --git a/vendor/riscv-isa-sim/riscv/insns/vsse8_v.h b/vendor/riscv-isa-sim/riscv/insns/vsse8_v.h new file mode 100644 index 00000000..5ba3ccec --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsse8_v.h @@ -0,0 +1,2 @@ +// vsse8.v and vssseg[2-8]e8.v +VI_ST(i * RS2, fn, uint8, false); diff --git a/vendor/riscv-isa-sim/riscv/insns/vssra_vi.h b/vendor/riscv-isa-sim/riscv/insns/vssra_vi.h new file mode 100644 index 00000000..ff2e1c58 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vssra_vi.h @@ -0,0 +1,10 @@ +// vssra.vi vd, vs2, simm5 +VRM xrm = P.VU.get_vround_mode(); +VI_VI_LOOP +({ + int sh = simm5 & (sew - 1) & 0x1f; + int128_t val = vs2; + + INT_ROUNDING(val, xrm, sh); + vd = val >> sh; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vssra_vv.h b/vendor/riscv-isa-sim/riscv/insns/vssra_vv.h new file mode 100644 index 00000000..7bbc766f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vssra_vv.h @@ -0,0 +1,10 @@ +// vssra.vv vd, vs2, vs1 +VRM xrm = P.VU.get_vround_mode(); +VI_VV_LOOP +({ + int sh = vs1 & (sew - 1); + int128_t val = vs2; + + INT_ROUNDING(val, xrm, sh); + vd = val >> sh; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vssra_vx.h b/vendor/riscv-isa-sim/riscv/insns/vssra_vx.h new file mode 100644 index 00000000..068a22b6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vssra_vx.h @@ -0,0 +1,10 @@ +// vssra.vx vd, vs2, rs1 +VRM xrm = P.VU.get_vround_mode(); +VI_VX_LOOP +({ + int sh = rs1 & (sew - 1); + int128_t val = vs2; + + INT_ROUNDING(val, xrm, sh); + vd = val >> sh; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vssrl_vi.h b/vendor/riscv-isa-sim/riscv/insns/vssrl_vi.h new file mode 100644 index 00000000..d125164d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vssrl_vi.h @@ -0,0 +1,10 @@ +// vssra.vi vd, vs2, simm5 +VRM xrm = P.VU.get_vround_mode(); +VI_VI_ULOOP +({ + int sh = zimm5 & (sew - 1) & 0x1f; + uint128_t val = vs2; + + INT_ROUNDING(val, xrm, sh); + vd = val >> sh; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vssrl_vv.h b/vendor/riscv-isa-sim/riscv/insns/vssrl_vv.h new file mode 100644 index 00000000..a8e5d164 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vssrl_vv.h @@ -0,0 +1,10 @@ +// vssrl.vv vd, vs2, vs1 +VRM xrm = P.VU.get_vround_mode(); +VI_VV_ULOOP +({ + int sh = vs1 & (sew - 1); + uint128_t val = vs2; + + INT_ROUNDING(val, xrm, sh); + vd = val >> sh; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vssrl_vx.h b/vendor/riscv-isa-sim/riscv/insns/vssrl_vx.h new file mode 100644 index 00000000..ee3cb346 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vssrl_vx.h @@ -0,0 +1,10 @@ +// vssrl.vx vd, vs2, rs1 +VRM xrm = P.VU.get_vround_mode(); +VI_VX_ULOOP +({ + int sh = rs1 & (sew - 1); + uint128_t val = vs2; + + INT_ROUNDING(val, xrm, sh); + vd = val >> sh; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vssub_vv.h b/vendor/riscv-isa-sim/riscv/insns/vssub_vv.h new file mode 100644 index 00000000..d55df238 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vssub_vv.h @@ -0,0 +1,29 @@ +// vssub.vv vd, vs2, vs1 +VI_CHECK_SSS(true); +VI_LOOP_BASE +bool sat = false; + +switch (sew) { +case e8: { + VV_PARAMS(e8); + vd = sat_sub(vs2, vs1, sat); + break; +} +case e16: { + VV_PARAMS(e16); + vd = sat_sub(vs2, vs1, sat); + break; +} +case e32: { + VV_PARAMS(e32); + vd = sat_sub(vs2, vs1, sat); + break; +} +default: { + VV_PARAMS(e64); + vd = sat_sub(vs2, vs1, sat); + break; +} +} +P_SET_OV(sat); +VI_LOOP_END diff --git a/vendor/riscv-isa-sim/riscv/insns/vssub_vx.h b/vendor/riscv-isa-sim/riscv/insns/vssub_vx.h new file mode 100644 index 00000000..cbfa2880 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vssub_vx.h @@ -0,0 +1,29 @@ +// vssub.vx vd, vs2, rs1 +VI_CHECK_SSS(false); +VI_LOOP_BASE +bool sat = false; + +switch (sew) { +case e8: { + VX_PARAMS(e8); + vd = sat_sub(vs2, rs1, sat); + break; +} +case e16: { + VX_PARAMS(e16); + vd = sat_sub(vs2, rs1, sat); + break; +} +case e32: { + VX_PARAMS(e32); + vd = sat_sub(vs2, rs1, sat); + break; +} +default: { + VX_PARAMS(e64); + vd = sat_sub(vs2, rs1, sat); + break; +} +} +P_SET_OV(sat); +VI_LOOP_END diff --git a/vendor/riscv-isa-sim/riscv/insns/vssubu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vssubu_vv.h new file mode 100644 index 00000000..667a2c51 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vssubu_vv.h @@ -0,0 +1,30 @@ +// vssubu.vv vd, vs2, vs1 +VI_CHECK_SSS(true); +VI_LOOP_BASE +bool sat = false; + +switch (sew) { +case e8: { + VV_U_PARAMS(e8); + vd = sat_subu(vs2, vs1, sat); + break; +} +case e16: { + VV_U_PARAMS(e16); + vd = sat_subu(vs2, vs1, sat); + break; +} +case e32: { + VV_U_PARAMS(e32); + vd = sat_subu(vs2, vs1, sat); + break; +} +default: { + VV_U_PARAMS(e64); + vd = sat_subu(vs2, vs1, sat); + break; +} +} +P_SET_OV(sat); + +VI_LOOP_END diff --git a/vendor/riscv-isa-sim/riscv/insns/vssubu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vssubu_vx.h new file mode 100644 index 00000000..603f35e7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vssubu_vx.h @@ -0,0 +1,29 @@ +// vssubu.vx vd, vs2, rs1 +VI_CHECK_SSS(false); +VI_LOOP_BASE +bool sat = false; + +switch (sew) { +case e8: { + VX_U_PARAMS(e8); + vd = sat_subu(vs2, rs1, sat); + break; +} +case e16: { + VX_U_PARAMS(e16); + vd = sat_subu(vs2, rs1, sat); + break; +} +case e32: { + VX_U_PARAMS(e32); + vd = sat_subu(vs2, rs1, sat); + break; +} +default: { + VX_U_PARAMS(e64); + vd = sat_subu(vs2, rs1, sat); + break; +} +} +P_SET_OV(sat); +VI_LOOP_END diff --git a/vendor/riscv-isa-sim/riscv/insns/vsub_vv.h b/vendor/riscv-isa-sim/riscv/insns/vsub_vv.h new file mode 100644 index 00000000..7d119d50 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsub_vv.h @@ -0,0 +1,5 @@ +// vsub +VI_VV_LOOP +({ + vd = vs2 - vs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vsub_vx.h b/vendor/riscv-isa-sim/riscv/insns/vsub_vx.h new file mode 100644 index 00000000..e075b423 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsub_vx.h @@ -0,0 +1,5 @@ +// vsub: vd[i] = (vd[i] * x[rs1]) - vs2[i] +VI_VX_LOOP +({ + vd = vs2 - rs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vsuxei16_v.h b/vendor/riscv-isa-sim/riscv/insns/vsuxei16_v.h new file mode 100644 index 00000000..f5549187 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsuxei16_v.h @@ -0,0 +1,2 @@ +// vsuxe16.v +VI_ST_INDEX(e16, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/vsuxei32_v.h b/vendor/riscv-isa-sim/riscv/insns/vsuxei32_v.h new file mode 100644 index 00000000..783bbade --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsuxei32_v.h @@ -0,0 +1,2 @@ +// vsuxe32.v +VI_ST_INDEX(e32, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/vsuxei64_v.h b/vendor/riscv-isa-sim/riscv/insns/vsuxei64_v.h new file mode 100644 index 00000000..9e6018b6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsuxei64_v.h @@ -0,0 +1,2 @@ +// vsuxe64.v +VI_ST_INDEX(e64, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/vsuxei8_v.h b/vendor/riscv-isa-sim/riscv/insns/vsuxei8_v.h new file mode 100644 index 00000000..322dc35e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsuxei8_v.h @@ -0,0 +1,2 @@ +// vsuxe8.v +VI_ST_INDEX(e8, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/vwadd_vv.h b/vendor/riscv-isa-sim/riscv/insns/vwadd_vv.h new file mode 100644 index 00000000..df4a1353 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwadd_vv.h @@ -0,0 +1,6 @@ +// vwadd.vv vd, vs2, vs1 +VI_CHECK_DSS(true); +VI_VV_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN(vs2, vs1, 0, +, +, int); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwadd_vx.h b/vendor/riscv-isa-sim/riscv/insns/vwadd_vx.h new file mode 100644 index 00000000..c2263893 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwadd_vx.h @@ -0,0 +1,6 @@ +// vwadd.vx vd, vs2, rs1 +VI_CHECK_DSS(false); +VI_VX_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN(vs2, rs1, 0, +, +, int); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwadd_wv.h b/vendor/riscv-isa-sim/riscv/insns/vwadd_wv.h new file mode 100644 index 00000000..54d2ba40 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwadd_wv.h @@ -0,0 +1,6 @@ +// vwadd.wv vd, vs2, vs1 +VI_CHECK_DDS(true); +VI_VV_LOOP_WIDEN +({ + VI_WIDE_WVX_OP(vs1, +, int); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwadd_wx.h b/vendor/riscv-isa-sim/riscv/insns/vwadd_wx.h new file mode 100644 index 00000000..bb4cee51 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwadd_wx.h @@ -0,0 +1,6 @@ +// vwaddu.wx vd, vs2, rs1 +VI_CHECK_DDS(false); +VI_VX_LOOP_WIDEN +({ + VI_WIDE_WVX_OP(rs1, +, int); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwaddu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vwaddu_vv.h new file mode 100644 index 00000000..286ebc85 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwaddu_vv.h @@ -0,0 +1,6 @@ +// vwaddu.vv vd, vs2, vs1 +VI_CHECK_DSS(true); +VI_VV_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN(vs2, vs1, 0, +, +, uint); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwaddu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vwaddu_vx.h new file mode 100644 index 00000000..61cddfc8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwaddu_vx.h @@ -0,0 +1,6 @@ +// vwaddu.vx vd, vs2, rs1 +VI_CHECK_DSS(false); +VI_VX_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN(vs2, rs1, 0, +, +, uint); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwaddu_wv.h b/vendor/riscv-isa-sim/riscv/insns/vwaddu_wv.h new file mode 100644 index 00000000..fee81365 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwaddu_wv.h @@ -0,0 +1,6 @@ +// vwaddu.wv vd, vs2, vs1 +VI_CHECK_DDS(true); +VI_VV_LOOP_WIDEN +({ + VI_WIDE_WVX_OP(vs1, +, uint); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwaddu_wx.h b/vendor/riscv-isa-sim/riscv/insns/vwaddu_wx.h new file mode 100644 index 00000000..0073ac35 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwaddu_wx.h @@ -0,0 +1,6 @@ +// vwaddu.wx vd, vs2, rs1 +VI_CHECK_DDS(false); +VI_VX_LOOP_WIDEN +({ + VI_WIDE_WVX_OP(rs1, +, uint); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwmacc_vv.h b/vendor/riscv-isa-sim/riscv/insns/vwmacc_vv.h new file mode 100644 index 00000000..7208c6d6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwmacc_vv.h @@ -0,0 +1,6 @@ +// vwmacc.vv vd, vs2, vs1 +VI_CHECK_DSS(true); +VI_VV_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN(vs2, vs1, vd_w, *, +, int); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwmacc_vx.h b/vendor/riscv-isa-sim/riscv/insns/vwmacc_vx.h new file mode 100644 index 00000000..5ae597a2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwmacc_vx.h @@ -0,0 +1,6 @@ +// vwmacc.vx vd, vs2, rs1 +VI_CHECK_DSS(false); +VI_VX_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN(vs2, rs1, vd_w, *, +, int); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwmaccsu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vwmaccsu_vv.h new file mode 100644 index 00000000..3aa43ef4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwmaccsu_vv.h @@ -0,0 +1,6 @@ +// vwmaccsu.vv vd, vs2, vs1 +VI_CHECK_DSS(true); +VI_VV_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN_MIX(vs2, vs1, vd_w, *, +, int, uint, int); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwmaccsu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vwmaccsu_vx.h new file mode 100644 index 00000000..e00a21dd --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwmaccsu_vx.h @@ -0,0 +1,6 @@ +// vwmaccsu.vx vd, vs2, rs1 +VI_CHECK_DSS(false); +VI_VX_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN_MIX(vs2, rs1, vd_w, *, +, int, uint, int); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwmaccu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vwmaccu_vv.h new file mode 100644 index 00000000..2cbdaa31 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwmaccu_vv.h @@ -0,0 +1,6 @@ +// vwmaccu.vv vd, vs2, vs1 +VI_CHECK_DSS(true); +VI_VV_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN(vs2, vs1, vd_w, *, +, uint); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwmaccu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vwmaccu_vx.h new file mode 100644 index 00000000..533297f3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwmaccu_vx.h @@ -0,0 +1,6 @@ +// vwmaccu.vx vd, vs2, rs1 +VI_CHECK_DSS(false); +VI_VX_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN(vs2, rs1, vd_w, *, +, uint); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwmaccus_vx.h b/vendor/riscv-isa-sim/riscv/insns/vwmaccus_vx.h new file mode 100644 index 00000000..5310f0e9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwmaccus_vx.h @@ -0,0 +1,6 @@ +// vwmaccus.vx vd, vs2, rs1 +VI_CHECK_DSS(false); +VI_VX_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN_MIX(vs2, rs1, vd_w, *, +, int, int, uint); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwmul_vv.h b/vendor/riscv-isa-sim/riscv/insns/vwmul_vv.h new file mode 100644 index 00000000..2197edbf --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwmul_vv.h @@ -0,0 +1,6 @@ +// vwmul.vv vd, vs2, vs1 +VI_CHECK_DSS(true); +VI_VV_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN(vs2, vs1, 0, *, +, int); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwmul_vx.h b/vendor/riscv-isa-sim/riscv/insns/vwmul_vx.h new file mode 100644 index 00000000..bc1422d4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwmul_vx.h @@ -0,0 +1,6 @@ +// vwmul.vx vd, vs2, rs1 +VI_CHECK_DSS(false); +VI_VX_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN(vs2, rs1, 0, *, +, int); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwmulsu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vwmulsu_vv.h new file mode 100644 index 00000000..ec373771 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwmulsu_vv.h @@ -0,0 +1,6 @@ +// vwmulsu.vv vd, vs2, vs1 +VI_CHECK_DSS(true); +VI_VV_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN_MIX(vs2, vs1, 0, *, +, uint, int, uint) +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwmulsu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vwmulsu_vx.h new file mode 100644 index 00000000..d58ecce0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwmulsu_vx.h @@ -0,0 +1,6 @@ +// vwmulsu.vx vd, vs2, rs1 +VI_CHECK_DSS(false); +VI_VX_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN_MIX(vs2, rs1, 0, *, +, uint, int, uint) +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwmulu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vwmulu_vv.h new file mode 100644 index 00000000..8ddbb4b4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwmulu_vv.h @@ -0,0 +1,6 @@ +// vwmulu.vv vd, vs2, vs1 +VI_CHECK_DSS(true); +VI_VV_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN(vs2, vs1, 0, *, +, uint); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwmulu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vwmulu_vx.h new file mode 100644 index 00000000..1ce77eef --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwmulu_vx.h @@ -0,0 +1,6 @@ +// vwmul.vx vd, vs2, rs1 +VI_CHECK_DSS(false); +VI_VX_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN(vs2, rs1, 0, *, +, uint); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwredsum_vs.h b/vendor/riscv-isa-sim/riscv/insns/vwredsum_vs.h new file mode 100644 index 00000000..c7a87db4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwredsum_vs.h @@ -0,0 +1,5 @@ +// vwredsum.vs vd, vs2, vs1 +VI_VV_LOOP_WIDE_REDUCTION +({ + vd_0_res += vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwredsumu_vs.h b/vendor/riscv-isa-sim/riscv/insns/vwredsumu_vs.h new file mode 100644 index 00000000..889a77d3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwredsumu_vs.h @@ -0,0 +1,5 @@ +// vwredsum.vs vd, vs2, vs1 +VI_VV_ULOOP_WIDE_REDUCTION +({ + vd_0_res += vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwsub_vv.h b/vendor/riscv-isa-sim/riscv/insns/vwsub_vv.h new file mode 100644 index 00000000..99f93489 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwsub_vv.h @@ -0,0 +1,6 @@ +// vwsub.vv vd, vs2, vs1 +VI_CHECK_DSS(true); +VI_VV_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN(vs2, vs1, 0, -, +, int); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwsub_vx.h b/vendor/riscv-isa-sim/riscv/insns/vwsub_vx.h new file mode 100644 index 00000000..affdf62c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwsub_vx.h @@ -0,0 +1,6 @@ +// vwsub.vx vd, vs2, rs1 +VI_CHECK_DSS(false); +VI_VX_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN(vs2, rs1, 0, -, +, int); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwsub_wv.h b/vendor/riscv-isa-sim/riscv/insns/vwsub_wv.h new file mode 100644 index 00000000..10db7308 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwsub_wv.h @@ -0,0 +1,6 @@ +// vwsub.wv vd, vs2, vs1 +VI_CHECK_DDS(true); +VI_VV_LOOP_WIDEN +({ + VI_WIDE_WVX_OP(vs1, -, int); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwsub_wx.h b/vendor/riscv-isa-sim/riscv/insns/vwsub_wx.h new file mode 100644 index 00000000..f72341ba --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwsub_wx.h @@ -0,0 +1,6 @@ +// vwsub.wx vd, vs2, rs1 +VI_CHECK_DDS(false); +VI_VX_LOOP_WIDEN +({ + VI_WIDE_WVX_OP(rs1, -, int); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwsubu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vwsubu_vv.h new file mode 100644 index 00000000..cf68adb9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwsubu_vv.h @@ -0,0 +1,6 @@ +// vwsubu.vv vd, vs2, vs1 +VI_CHECK_DSS(true); +VI_VV_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN(vs2, vs1, 0, -, +, uint); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwsubu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vwsubu_vx.h new file mode 100644 index 00000000..3e972dd2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwsubu_vx.h @@ -0,0 +1,6 @@ +// vwsubu.vx vd, vs2, rs1 +VI_CHECK_DSS(false); +VI_VX_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN(vs2, rs1, 0, -, +, uint); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwsubu_wv.h b/vendor/riscv-isa-sim/riscv/insns/vwsubu_wv.h new file mode 100644 index 00000000..3687c3d2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwsubu_wv.h @@ -0,0 +1,6 @@ +// vwsubu.wv vd, vs2, vs1 +VI_CHECK_DDS(true); +VI_VV_LOOP_WIDEN +({ + VI_WIDE_WVX_OP(vs1, -, uint); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwsubu_wx.h b/vendor/riscv-isa-sim/riscv/insns/vwsubu_wx.h new file mode 100644 index 00000000..c7f20edd --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwsubu_wx.h @@ -0,0 +1,6 @@ +// vwsubu.wx vd, vs2, rs1 +VI_CHECK_DDS(false); +VI_VX_LOOP_WIDEN +({ + VI_WIDE_WVX_OP(rs1, -, uint); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vxor_vi.h b/vendor/riscv-isa-sim/riscv/insns/vxor_vi.h new file mode 100644 index 00000000..b2dcf946 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vxor_vi.h @@ -0,0 +1,5 @@ +// vxor +VI_VI_LOOP +({ + vd = simm5 ^ vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vxor_vv.h b/vendor/riscv-isa-sim/riscv/insns/vxor_vv.h new file mode 100644 index 00000000..c37b6ab7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vxor_vv.h @@ -0,0 +1,5 @@ +// vxor +VI_VV_LOOP +({ + vd = vs1 ^ vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vxor_vx.h b/vendor/riscv-isa-sim/riscv/insns/vxor_vx.h new file mode 100644 index 00000000..8021e0e8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vxor_vx.h @@ -0,0 +1,5 @@ +// vxor +VI_VX_LOOP +({ + vd = rs1 ^ vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vzext_vf2.h b/vendor/riscv-isa-sim/riscv/insns/vzext_vf2.h new file mode 100644 index 00000000..100f2e35 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vzext_vf2.h @@ -0,0 +1 @@ +VI_VV_EXT(2, uint); diff --git a/vendor/riscv-isa-sim/riscv/insns/vzext_vf4.h b/vendor/riscv-isa-sim/riscv/insns/vzext_vf4.h new file mode 100644 index 00000000..6ff920e0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vzext_vf4.h @@ -0,0 +1 @@ +VI_VV_EXT(4, uint); diff --git a/vendor/riscv-isa-sim/riscv/insns/vzext_vf8.h b/vendor/riscv-isa-sim/riscv/insns/vzext_vf8.h new file mode 100644 index 00000000..b1762fbf --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vzext_vf8.h @@ -0,0 +1 @@ +VI_VV_EXT(8, uint); diff --git a/vendor/riscv-isa-sim/riscv/insns/wfi.h b/vendor/riscv-isa-sim/riscv/insns/wfi.h new file mode 100644 index 00000000..299cb01f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/wfi.h @@ -0,0 +1,11 @@ +if (STATE.v && STATE.prv == PRV_U) { + require_novirt(); +} else if (get_field(STATE.mstatus->read(), MSTATUS_TW)) { + require_privilege(PRV_M); +} else if (STATE.v) { // VS-mode + if (get_field(STATE.hstatus->read(), HSTATUS_VTW)) + require_novirt(); +} else { + require_privilege(PRV_S); +} +wfi(); diff --git a/vendor/riscv-isa-sim/riscv/insns/xnor.h b/vendor/riscv-isa-sim/riscv/insns/xnor.h new file mode 100644 index 00000000..ccf1c9f7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/xnor.h @@ -0,0 +1,2 @@ +require_either_extension(EXT_ZBB, EXT_ZBKB); +WRITE_RD(RS1 ^ ~RS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/xor.h b/vendor/riscv-isa-sim/riscv/insns/xor.h new file mode 100644 index 00000000..771efa7f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/xor.h @@ -0,0 +1 @@ +WRITE_RD(RS1 ^ RS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/xori.h b/vendor/riscv-isa-sim/riscv/insns/xori.h new file mode 100644 index 00000000..33ce6307 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/xori.h @@ -0,0 +1 @@ +WRITE_RD(insn.i_imm() ^ RS1); diff --git a/vendor/riscv-isa-sim/riscv/insns/xperm16.h b/vendor/riscv-isa-sim/riscv/insns/xperm16.h new file mode 100644 index 00000000..6b0ad51f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/xperm16.h @@ -0,0 +1,2 @@ +require_extension(EXT_XZBP); +WRITE_RD(sext_xlen(xperm(RS1, RS2, 4, xlen))); diff --git a/vendor/riscv-isa-sim/riscv/insns/xperm32.h b/vendor/riscv-isa-sim/riscv/insns/xperm32.h new file mode 100644 index 00000000..64d90a40 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/xperm32.h @@ -0,0 +1,3 @@ +require_rv64; +require_extension(EXT_XZBP); +WRITE_RD(xperm(RS1, RS2, 5, xlen)); diff --git a/vendor/riscv-isa-sim/riscv/insns/xperm4.h b/vendor/riscv-isa-sim/riscv/insns/xperm4.h new file mode 100644 index 00000000..38800f3b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/xperm4.h @@ -0,0 +1,2 @@ +require_either_extension(EXT_ZBKX, EXT_XZBP); +WRITE_RD(sext_xlen(xperm(RS1, RS2, 2, xlen))); diff --git a/vendor/riscv-isa-sim/riscv/insns/xperm8.h b/vendor/riscv-isa-sim/riscv/insns/xperm8.h new file mode 100644 index 00000000..c272d669 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/xperm8.h @@ -0,0 +1,2 @@ +require_either_extension(EXT_ZBKX, EXT_XZBP); +WRITE_RD(sext_xlen(xperm(RS1, RS2, 3, xlen))); diff --git a/vendor/riscv-isa-sim/riscv/insns/zunpkd810.h b/vendor/riscv-isa-sim/riscv/insns/zunpkd810.h new file mode 100644 index 00000000..88434dee --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/zunpkd810.h @@ -0,0 +1 @@ +P_ZUNPKD8(1, 0) diff --git a/vendor/riscv-isa-sim/riscv/insns/zunpkd820.h b/vendor/riscv-isa-sim/riscv/insns/zunpkd820.h new file mode 100644 index 00000000..f2065081 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/zunpkd820.h @@ -0,0 +1 @@ +P_ZUNPKD8(2, 0) diff --git a/vendor/riscv-isa-sim/riscv/insns/zunpkd830.h b/vendor/riscv-isa-sim/riscv/insns/zunpkd830.h new file mode 100644 index 00000000..13655149 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/zunpkd830.h @@ -0,0 +1 @@ +P_ZUNPKD8(3, 0) diff --git a/vendor/riscv-isa-sim/riscv/insns/zunpkd831.h b/vendor/riscv-isa-sim/riscv/insns/zunpkd831.h new file mode 100644 index 00000000..8febe77f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/zunpkd831.h @@ -0,0 +1 @@ +P_ZUNPKD8(3, 1) diff --git a/vendor/riscv-isa-sim/riscv/insns/zunpkd832.h b/vendor/riscv-isa-sim/riscv/insns/zunpkd832.h new file mode 100644 index 00000000..f14030bc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/zunpkd832.h @@ -0,0 +1 @@ +P_ZUNPKD8(3, 2) diff --git a/vendor/riscv-isa-sim/riscv/interactive.cc b/vendor/riscv-isa-sim/riscv/interactive.cc new file mode 100644 index 00000000..88eb86b1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/interactive.cc @@ -0,0 +1,579 @@ +// See LICENSE for license details. + +#include "sim.h" +#include "decode.h" +#include "disasm.h" +#include "mmu.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAX_CMD_STR 40 // maximum possible size of a command line + +#define STR_(X) #X // these definitions allow to use a macro as a string +#define STR(X) STR_(X) + +DECLARE_TRAP(-1, interactive) + +processor_t *sim_t::get_core(const std::string& i) +{ + char *ptr; + unsigned long p = strtoul(i.c_str(), &ptr, 10); + if (*ptr || p >= procs.size()) + throw trap_interactive(); + return get_core(p); +} + +static std::string readline(int fd) +{ + struct termios tios; + bool noncanonical = tcgetattr(fd, &tios) == 0 && (tios.c_lflag & ICANON) == 0; + + std::string s; + for (char ch; read(fd, &ch, 1) == 1; ) + { + if (ch == '\x7f') + { + if (s.empty()) + continue; + s.erase(s.end()-1); + + if (noncanonical && write(fd, "\b \b", 3) != 3) {} + } + else if (noncanonical && write(fd, &ch, 1) != 1) {} + + if (ch == '\n') + break; + if (ch != '\x7f') + s += ch; + } + return s; +} + +#ifdef HAVE_BOOST_ASIO +// read input command string +std::string sim_t::rin(boost::asio::streambuf *bout_ptr) { + std::string s; + if (acceptor_ptr) { // if we are listening, get commands from socket + try { + socket_ptr.reset(new boost::asio::ip::tcp::socket(*io_service_ptr)); + acceptor_ptr->accept(*socket_ptr); // wait for someone to open connection + boost::asio::streambuf buf; + boost::asio::read_until(*socket_ptr, buf, "\n"); // wait for command + s = boost::asio::buffer_cast(buf.data()); + boost::erase_all(s, "\r"); // get rid off any cr and lf + boost::erase_all(s, "\n"); + // The socket client is a web server and it appends the IP of the computer + // that sent the command from its web browser. + + // For now, erase the IP if it is there. + boost::regex re(" ((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}" + "(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])$"); + s = boost::regex_replace(s, re, (std::string)""); + + // TODO: check the IP against the IP used to upload RISC-V source files + } catch (std::exception& e) { + std::cerr << e.what() << std::endl; + } + // output goes to socket + sout_.rdbuf(bout_ptr); + } else { // if we are not listening on a socket, get commands from terminal + std::cerr << ": " << std::flush; + s = readline(2); // 2 is stderr, but when doing reads it reverts to stdin + // output goes to stderr + sout_.rdbuf(std::cerr.rdbuf()); + } + return s; +} + +// write sout_ to socket (via bout) +void sim_t::wout(boost::asio::streambuf *bout_ptr) { + if (!cmd_file && acceptor_ptr) { // only if we are not getting command inputs from a file + // and if a socket has been created + try { + boost::system::error_code ignored_error; + boost::asio::write(*socket_ptr, *bout_ptr, boost::asio::transfer_all(), ignored_error); + socket_ptr->close(); // close the socket after each command input/ouput + // This is need to in order to make the socket interface + // acessible by HTTP GET via a socket client in a web server. + } catch (std::exception& e) { + std::cerr << e.what() << std::endl; + } + } +} +#endif + +void sim_t::interactive() +{ + typedef void (sim_t::*interactive_func)(const std::string&, const std::vector&); + std::map funcs; + + funcs["run"] = &sim_t::interactive_run_noisy; + funcs["r"] = funcs["run"]; + funcs["rs"] = &sim_t::interactive_run_silent; + funcs["vreg"] = &sim_t::interactive_vreg; + funcs["reg"] = &sim_t::interactive_reg; + funcs["freg"] = &sim_t::interactive_freg; + funcs["fregh"] = &sim_t::interactive_fregh; + funcs["fregs"] = &sim_t::interactive_fregs; + funcs["fregd"] = &sim_t::interactive_fregd; + funcs["pc"] = &sim_t::interactive_pc; + funcs["mem"] = &sim_t::interactive_mem; + funcs["str"] = &sim_t::interactive_str; + funcs["until"] = &sim_t::interactive_until_silent; + funcs["untiln"] = &sim_t::interactive_until_noisy; + funcs["while"] = &sim_t::interactive_until_silent; + funcs["quit"] = &sim_t::interactive_quit; + funcs["q"] = funcs["quit"]; + funcs["help"] = &sim_t::interactive_help; + funcs["h"] = funcs["help"]; + + while (!done()) + { +#ifdef HAVE_BOOST_ASIO + boost::asio::streambuf bout; // socket output +#endif + std::string s; + char cmd_str[MAX_CMD_STR+1]; // only used for following fscanf + // first get commands from file, if cmd_file has been set + if (cmd_file && !feof(cmd_file) && fscanf(cmd_file,"%" STR(MAX_CMD_STR) "[^\n]\n", cmd_str)==1) { + // up to MAX_CMD_STR characters before \n, skipping \n + s = cmd_str; + // while we get input from file, output goes to stderr + sout_.rdbuf(std::cerr.rdbuf()); + } else { + // when there are no commands left from file or if there was no file from the beginning + cmd_file = NULL; // mark file pointer as being not valid, so any method can test this easily +#ifdef HAVE_BOOST_ASIO + s = rin(&bout); // get command string from socket or terminal +#else + std::cerr << ": " << std::flush; + s = readline(2); // 2 is stderr, but when doing reads it reverts to stdin +#endif + } + + std::stringstream ss(s); + std::string cmd, tmp; + std::vector args; + + if (!(ss >> cmd)) + { + set_procs_debug(true); + step(1); +#ifdef HAVE_BOOST_ASIO + wout(&bout); // socket output, if required +#endif + continue; + } + + while (ss >> tmp) + args.push_back(tmp); + + std::ostream out(sout_.rdbuf()); + + try + { + if (funcs.count(cmd)) + (this->*funcs[cmd])(cmd, args); + else + out << "Unknown command " << cmd << std::endl; + } catch(trap_t& t) { + out << "Bad or missing arguments for command " << cmd << std::endl; + } +#ifdef HAVE_BOOST_ASIO + wout(&bout); // socket output, if required +#endif + } + ctrlc_pressed = false; +} + +void sim_t::interactive_help(const std::string& cmd, const std::vector& args) +{ + std::ostream out(sout_.rdbuf()); + out << + "Interactive commands:\n" + "reg [reg] # Display [reg] (all if omitted) in \n" + "freg # Display float in as hex\n" + "fregh # Display half precision in \n" + "fregs # Display single precision in \n" + "fregd # Display double precision in \n" + "vreg [reg] # Display vector [reg] (all if omitted) in \n" + "pc # Show current PC in \n" + "mem # Show contents of physical memory\n" + "str # Show NUL-terminated C string at in core \n" + "until reg # Stop when in hits \n" + "until pc # Stop when PC in hits \n" + "untiln pc # Run noisy and stop when PC in hits \n" + "until mem # Stop when memory becomes \n" + "while reg # Run while in is \n" + "while pc # Run while PC in is \n" + "while mem # Run while memory is \n" + "run [count] # Resume noisy execution (until CTRL+C, or [count] insns)\n" + "r [count] Alias for run\n" + "rs [count] # Resume silent execution (until CTRL+C, or [count] insns)\n" + "quit # End the simulation\n" + "q Alias for quit\n" + "help # This screen!\n" + "h Alias for help\n" + "Note: Hitting enter is the same as: run 1" + << std::endl; +} + +void sim_t::interactive_run_noisy(const std::string& cmd, const std::vector& args) +{ + interactive_run(cmd,args,true); +} + +void sim_t::interactive_run_silent(const std::string& cmd, const std::vector& args) +{ + interactive_run(cmd,args,false); +} + +void sim_t::interactive_run(const std::string& cmd, const std::vector& args, bool noisy) +{ + size_t steps = args.size() ? atoll(args[0].c_str()) : -1; + ctrlc_pressed = false; + set_procs_debug(noisy); + for (size_t i = 0; i < steps && !ctrlc_pressed && !done(); i++) + step(1); + + std::ostream out(sout_.rdbuf()); + if (!noisy) out << ":" << std::endl; +} + +void sim_t::interactive_quit(const std::string& cmd, const std::vector& args) +{ + exit(0); +} + +reg_t sim_t::get_pc(const std::vector& args) +{ + if (args.size() != 1) + throw trap_interactive(); + + processor_t *p = get_core(args[0]); + return p->get_state()->pc; +} + +void sim_t::interactive_pc(const std::string& cmd, const std::vector& args) +{ + if(args.size() != 1) + throw trap_interactive(); + + processor_t *p = get_core(args[0]); + int max_xlen = p->get_isa().get_max_xlen(); + + std::ostream out(sout_.rdbuf()); + out << std::hex << std::setfill('0') << "0x" << std::setw(max_xlen/4) + << zext(get_pc(args), max_xlen) << std::endl; +} + +reg_t sim_t::get_reg(const std::vector& args) +{ + if (args.size() != 2) + throw trap_interactive(); + + processor_t *p = get_core(args[0]); + + unsigned long r = std::find(xpr_name, xpr_name + NXPR, args[1]) - xpr_name; + if (r == NXPR) { + char *ptr; + r = strtoul(args[1].c_str(), &ptr, 10); + if (*ptr) { + #define DECLARE_CSR(name, number) if (args[1] == #name) return p->get_csr(number); + #include "encoding.h" // generates if's for all csrs + r = NXPR; // else case (csr name not found) + #undef DECLARE_CSR + } + } + + if (r >= NXPR) + throw trap_interactive(); + + return p->get_state()->XPR[r]; +} + +freg_t sim_t::get_freg(const std::vector& args) +{ + if(args.size() != 2) + throw trap_interactive(); + + processor_t *p = get_core(args[0]); + int r = std::find(fpr_name, fpr_name + NFPR, args[1]) - fpr_name; + if (r == NFPR) + r = atoi(args[1].c_str()); + if (r >= NFPR) + throw trap_interactive(); + + return p->get_state()->FPR[r]; +} + +void sim_t::interactive_vreg(const std::string& cmd, const std::vector& args) +{ + if (args.size() < 1) + throw trap_interactive(); + + int rstart = 0; + int rend = NVPR; + if (args.size() >= 2) { + rstart = strtol(args[1].c_str(), NULL, 0); + if (!(rstart >= 0 && rstart < NVPR)) { + rstart = 0; + } else { + rend = rstart + 1; + } + } + + // Show all the regs! + processor_t *p = get_core(args[0]); + const int vlen = (int)(p->VU.get_vlen()) >> 3; + const int elen = (int)(p->VU.get_elen()) >> 3; + const int num_elem = vlen/elen; + + std::ostream out(sout_.rdbuf()); + out << std::dec << "VLEN=" << (vlen << 3) << " bits; ELEN=" << (elen << 3) << " bits" << std::endl; + + for (int r = rstart; r < rend; ++r) { + out << std::setfill (' ') << std::left << std::setw(4) << vr_name[r] << std::right << ": "; + for (int e = num_elem-1; e >= 0; --e){ + uint64_t val; + switch(elen){ + case 8: + val = p->VU.elt(r, e); + out << std::dec << "[" << e << "]: 0x" << std::hex << std::setfill ('0') << std::setw(16) << val << " "; + break; + case 4: + val = p->VU.elt(r, e); + out << std::dec << "[" << e << "]: 0x" << std::hex << std::setfill ('0') << std::setw(8) << (uint32_t)val << " "; + break; + case 2: + val = p->VU.elt(r, e); + out << std::dec << "[" << e << "]: 0x" << std::hex << std::setfill ('0') << std::setw(8) << (uint16_t)val << " "; + break; + case 1: + val = p->VU.elt(r, e); + out << std::dec << "[" << e << "]: 0x" << std::hex << std::setfill ('0') << std::setw(8) << (int)(uint8_t)val << " "; + break; + } + } + out << std::endl; + } +} + + +void sim_t::interactive_reg(const std::string& cmd, const std::vector& args) +{ + if (args.size() < 1) + throw trap_interactive(); + + processor_t *p = get_core(args[0]); + int max_xlen = p->get_isa().get_max_xlen(); + + std::ostream out(sout_.rdbuf()); + out << std::hex; + + if (args.size() == 1) { + // Show all the regs! + + for (int r = 0; r < NXPR; ++r) { + out << std::setfill(' ') << std::setw(4) << xpr_name[r] + << ": 0x" << std::setfill('0') << std::setw(max_xlen/4) + << zext(p->get_state()->XPR[r], max_xlen); + if ((r + 1) % 4 == 0) + out << std::endl; + } + } else { + out << "0x" << std::setfill('0') << std::setw(max_xlen/4) + << zext(get_reg(args), max_xlen) << std::endl; + } +} + +union fpr +{ + freg_t r; + float s; + double d; +}; + +void sim_t::interactive_freg(const std::string& cmd, const std::vector& args) +{ + freg_t r = get_freg(args); + + std::ostream out(sout_.rdbuf()); + out << std::hex << "0x" << std::setfill ('0') << std::setw(16) << r.v[1] << std::setw(16) << r.v[0] << std::endl; +} + +void sim_t::interactive_fregh(const std::string& cmd, const std::vector& args) +{ + fpr f; + f.r = freg(f16_to_f32(f16(get_freg(args)))); + + std::ostream out(sout_.rdbuf()); + out << (isBoxedF32(f.r) ? (double)f.s : NAN) << std::endl; +} + +void sim_t::interactive_fregs(const std::string& cmd, const std::vector& args) +{ + fpr f; + f.r = get_freg(args); + + std::ostream out(sout_.rdbuf()); + out << (isBoxedF32(f.r) ? (double)f.s : NAN) << std::endl; +} + +void sim_t::interactive_fregd(const std::string& cmd, const std::vector& args) +{ + fpr f; + f.r = get_freg(args); + + std::ostream out(sout_.rdbuf()); + out << (isBoxedF64(f.r) ? f.d : NAN) << std::endl; +} + +reg_t sim_t::get_mem(const std::vector& args) +{ + if (args.size() != 1 && args.size() != 2) + throw trap_interactive(); + + std::string addr_str = args[0]; + mmu_t* mmu = debug_mmu; + if (args.size() == 2) + { + processor_t *p = get_core(args[0]); + mmu = p->get_mmu(); + addr_str = args[1]; + } + + reg_t addr = strtol(addr_str.c_str(),NULL,16), val; + if (addr == LONG_MAX) + addr = strtoul(addr_str.c_str(),NULL,16); + + switch(addr % 8) + { + case 0: + val = mmu->load_uint64(addr); + break; + case 4: + val = mmu->load_uint32(addr); + break; + case 2: + case 6: + val = mmu->load_uint16(addr); + break; + default: + val = mmu->load_uint8(addr); + break; + } + return val; +} + +void sim_t::interactive_mem(const std::string& cmd, const std::vector& args) +{ + int max_xlen = procs[0]->get_isa().get_max_xlen(); + + std::ostream out(sout_.rdbuf()); + out << std::hex << "0x" << std::setfill('0') << std::setw(max_xlen/4) + << zext(get_mem(args), max_xlen) << std::endl; +} + +void sim_t::interactive_str(const std::string& cmd, const std::vector& args) +{ + if (args.size() != 1 && args.size() != 2) + throw trap_interactive(); + + std::string addr_str = args[0]; + mmu_t* mmu = debug_mmu; + if (args.size() == 2) + { + processor_t *p = get_core(args[0]); + mmu = p->get_mmu(); + addr_str = args[1]; + } + + reg_t addr = strtol(addr_str.c_str(),NULL,16); + + std::ostream out(sout_.rdbuf()); + + char ch; + while((ch = mmu->load_uint8(addr++))) + out << ch; + + out << std::endl; +} + +void sim_t::interactive_until_silent(const std::string& cmd, const std::vector& args) +{ + interactive_until(cmd, args, false); +} + +void sim_t::interactive_until_noisy(const std::string& cmd, const std::vector& args) +{ + interactive_until(cmd, args, true); +} + +void sim_t::interactive_until(const std::string& cmd, const std::vector& args, bool noisy) +{ + bool cmd_until = cmd == "until" || cmd == "untiln"; + + if (args.size() < 3) + throw trap_interactive(); + + if (args.size() == 3) + get_core(args[1]); // make sure that argument is a valid core number + + char *end; + reg_t val = strtol(args[args.size()-1].c_str(),&end,16); + if (val == LONG_MAX) + val = strtoul(args[args.size()-1].c_str(),&end,16); + if (args[args.size()-1].c_str() == end) // not a valid number + throw trap_interactive(); + + // mask bits above max_xlen + int max_xlen = procs[strtol(args[1].c_str(),NULL,10)]->get_isa().get_max_xlen(); + if (max_xlen == 32) val &= 0xFFFFFFFF; + + std::vector args2; + args2 = std::vector(args.begin()+1,args.end()-1); + + auto func = args[0] == "reg" ? &sim_t::get_reg : + args[0] == "pc" ? &sim_t::get_pc : + args[0] == "mem" ? &sim_t::get_mem : + NULL; + + if (func == NULL) + throw trap_interactive(); + + ctrlc_pressed = false; + + while (1) + { + try + { + reg_t current = (this->*func)(args2); + + // mask bits above max_xlen + if (max_xlen == 32) current &= 0xFFFFFFFF; + + if (cmd_until == (current == val)) + break; + if (ctrlc_pressed) + break; + } + catch (trap_t& t) {} + + set_procs_debug(noisy); + step(1); + } +} diff --git a/vendor/riscv-isa-sim/riscv/isa_parser.cc b/vendor/riscv-isa-sim/riscv/isa_parser.cc new file mode 100644 index 00000000..0adec2cc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/isa_parser.cc @@ -0,0 +1,247 @@ +#include "isa_parser.h" +#include "extension.h" + +static std::string strtolower(const char* str) +{ + std::string res; + for (const char *r = str; *r; r++) + res += std::tolower(*r); + return res; +} + +static void bad_option_string(const char *option, const char *value, + const char *msg) +{ + fprintf(stderr, "error: bad %s option '%s'. %s\n", option, value, msg); + abort(); +} + +static void bad_isa_string(const char* isa, const char* msg) +{ + bad_option_string("--isa", isa, msg); +} + +static void bad_priv_string(const char* priv) +{ + fprintf(stderr, "error: bad --priv option %s\n", priv); + abort(); +} + +isa_parser_t::isa_parser_t(const char* str, const char *priv) + : extension_table(256, false) +{ + isa_string = strtolower(str); + const char* all_subsets = "mafdqchpv"; + + max_isa = reg_t(2) << 62; + // enable zicntr and zihpm unconditionally for backward compatibility + extension_table[EXT_ZICNTR] = true; + extension_table[EXT_ZIHPM] = true; + + if (isa_string.compare(0, 4, "rv32") == 0) + max_xlen = 32, max_isa = reg_t(1) << 30; + else if (isa_string.compare(0, 4, "rv64") == 0) + max_xlen = 64; + else + bad_isa_string(str, "ISA strings must begin with RV32 or RV64"); + + switch (isa_string[4]) { + case 'g': + // G = IMAFD_Zicsr_Zifencei, but Spike includes the latter two + // unconditionally, so they need not be explicitly added here. + isa_string = isa_string.substr(0, 4) + "imafd" + isa_string.substr(5); + // Fall through + case 'i': + max_isa |= 1L << ('i' - 'a'); + break; + + case 'e': + max_isa |= 1L << ('e' - 'a'); + break; + + default: + bad_isa_string(str, ("'" + isa_string.substr(0, 4) + "' must be followed by I, E, or G").c_str()); + } + + const char* isa_str = isa_string.c_str(); + auto p = isa_str, subset = all_subsets; + for (p += 5; islower(*p) && !strchr("zsx", *p); ++p) { + while (*subset && (*p != *subset)) + ++subset; + + if (!*subset) { + if (strchr(all_subsets, *p)) + bad_isa_string(str, ("Extension '" + std::string(1, *p) + "' appears too late in ISA string").c_str()); + else + bad_isa_string(str, ("Unsupported extension '" + std::string(1, *p) + "'").c_str()); + } + + switch (*p) { + case 'p': extension_table[EXT_ZBPBO] = true; + extension_table[EXT_ZPN] = true; + extension_table[EXT_ZPSFOPERAND] = true; + extension_table[EXT_ZMMUL] = true; break; + case 'v': // even rv32iv implies double float + case 'q': max_isa |= 1L << ('d' - 'a'); + // Fall through + case 'd': max_isa |= 1L << ('f' - 'a'); + } + max_isa |= 1L << (*p - 'a'); + extension_table[toupper(*p)] = true; + while (isdigit(*(p + 1))) { + ++p; // skip major version, point, and minor version if presented + if (*(p + 1) == 'p') ++p; + } + p += *(p + 1) == '_'; // underscores may be used to improve readability + } + + while (islower(*p) || (*p == '_')) { + p += *p == '_'; // first underscore is optional + auto end = p; + do ++end; while (*end && *end != '_'); + auto ext_str = std::string(p, end); + if (ext_str == "zfh" || ext_str == "zfhmin") { + if (!((max_isa >> ('f' - 'a')) & 1)) + bad_isa_string(str, ("'" + ext_str + "' extension requires 'F'").c_str()); + extension_table[EXT_ZFHMIN] = true; + if (ext_str == "zfh") + extension_table[EXT_ZFH] = true; + } else if (ext_str == "zicsr") { + // Spike necessarily has Zicsr, because + // Zicsr is implied by the privileged architecture + } else if (ext_str == "zifencei") { + // For compatibility with version 2.0 of the base ISAs, we + // unconditionally include FENCE.I, so Zifencei adds nothing more. + } else if (ext_str == "zihintpause") { + // HINTs encoded in base-ISA instructions are always present. + } else if (ext_str == "zmmul") { + extension_table[EXT_ZMMUL] = true; + } else if (ext_str == "zba") { + extension_table[EXT_ZBA] = true; + } else if (ext_str == "zbb") { + extension_table[EXT_ZBB] = true; + } else if (ext_str == "zbc") { + extension_table[EXT_ZBC] = true; + } else if (ext_str == "zbs") { + extension_table[EXT_ZBS] = true; + } else if (ext_str == "zbkb") { + extension_table[EXT_ZBKB] = true; + } else if (ext_str == "zbkc") { + extension_table[EXT_ZBKC] = true; + } else if (ext_str == "zbkx") { + extension_table[EXT_ZBKX] = true; + } else if (ext_str == "zk") { + extension_table[EXT_ZBKB] = true; + extension_table[EXT_ZBKC] = true; + extension_table[EXT_ZBKX] = true; + extension_table[EXT_ZKND] = true; + extension_table[EXT_ZKNE] = true; + extension_table[EXT_ZKNH] = true; + extension_table[EXT_ZKR] = true; + } else if (ext_str == "zkn") { + extension_table[EXT_ZBKB] = true; + extension_table[EXT_ZBKC] = true; + extension_table[EXT_ZBKX] = true; + extension_table[EXT_ZKND] = true; + extension_table[EXT_ZKNE] = true; + extension_table[EXT_ZKNH] = true; + } else if (ext_str == "zknd") { + extension_table[EXT_ZKND] = true; + } else if (ext_str == "zkne") { + extension_table[EXT_ZKNE] = true; + } else if (ext_str == "zknh") { + extension_table[EXT_ZKNH] = true; + } else if (ext_str == "zks") { + extension_table[EXT_ZBKB] = true; + extension_table[EXT_ZBKC] = true; + extension_table[EXT_ZBKX] = true; + extension_table[EXT_ZKSED] = true; + extension_table[EXT_ZKSH] = true; + } else if (ext_str == "zksed") { + extension_table[EXT_ZKSED] = true; + } else if (ext_str == "zksh") { + extension_table[EXT_ZKSH] = true; + } else if (ext_str == "zkr") { + extension_table[EXT_ZKR] = true; + } else if (ext_str == "zkt") { + } else if (ext_str == "svnapot") { + extension_table[EXT_SVNAPOT] = true; + } else if (ext_str == "svpbmt") { + extension_table[EXT_SVPBMT] = true; + } else if (ext_str == "svinval") { + extension_table[EXT_SVINVAL] = true; + } else if (ext_str == "zicbom") { + extension_table[EXT_ZICBOM] = true; + } else if (ext_str == "zicboz") { + extension_table[EXT_ZICBOZ] = true; + } else if (ext_str == "zicbop") { + } else if (ext_str == "zicntr") { + } else if (ext_str == "zihpm") { + } else if (ext_str[0] == 'x') { + max_isa |= 1L << ('x' - 'a'); + extension_table[toupper('x')] = true; + if (ext_str == "xbitmanip") { + extension_table[EXT_XZBP] = true; + extension_table[EXT_XZBS] = true; + extension_table[EXT_XZBE] = true; + extension_table[EXT_XZBF] = true; + extension_table[EXT_XZBC] = true; + extension_table[EXT_XZBM] = true; + extension_table[EXT_XZBR] = true; + extension_table[EXT_XZBT] = true; + } else if (ext_str == "xzbp") { + extension_table[EXT_XZBP] = true; + } else if (ext_str == "xzbs") { + extension_table[EXT_XZBS] = true; + } else if (ext_str == "xzbe") { + extension_table[EXT_XZBE] = true; + } else if (ext_str == "xzbf") { + extension_table[EXT_XZBF] = true; + } else if (ext_str == "xzbc") { + extension_table[EXT_XZBC] = true; + } else if (ext_str == "xzbm") { + extension_table[EXT_XZBM] = true; + } else if (ext_str == "xzbr") { + extension_table[EXT_XZBR] = true; + } else if (ext_str == "xzbt") { + extension_table[EXT_XZBT] = true; + } else if (ext_str.size() == 1) { + bad_isa_string(str, "single 'X' is not a proper name"); + } else if (ext_str != "xdummy") { + extension_t* x = find_extension(ext_str.substr(1).c_str())(); + if (!extensions.insert(std::make_pair(x->name(), x)).second) { + fprintf(stderr, "extensions must have unique names (got two named \"%s\"!)\n", x->name()); + abort(); + } + } + } else { + bad_isa_string(str, ("unsupported extension: " + ext_str).c_str()); + } + p = end; + } + if (*p) { + bad_isa_string(str, ("can't parse: " + std::string(p)).c_str()); + } + + std::string lowercase = strtolower(priv); + bool user = false, supervisor = false; + + if (lowercase == "m") + ; + else if (lowercase == "mu") + user = true; + else if (lowercase == "msu") + user = supervisor = true; + else + bad_priv_string(priv); + + if (user) { + max_isa |= reg_t(user) << ('u' - 'a'); + extension_table['U'] = true; + } + + if (supervisor) { + max_isa |= reg_t(supervisor) << ('s' - 'a'); + extension_table['S'] = true; + } +} diff --git a/vendor/riscv-isa-sim/riscv/isa_parser.h b/vendor/riscv-isa-sim/riscv/isa_parser.h new file mode 100644 index 00000000..3cefe12d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/isa_parser.h @@ -0,0 +1,90 @@ +// See LICENSE for license details. +#ifndef _RISCV_ISA_PARSER_H +#define _RISCV_ISA_PARSER_H + +#include "decode.h" + +#include +#include +#include + +class extension_t; + +typedef enum { + // 65('A') ~ 90('Z') is reserved for standard isa in misa + EXT_ZFH, + EXT_ZFHMIN, + EXT_ZBA, + EXT_ZBB, + EXT_ZBC, + EXT_ZBS, + EXT_ZBKB, + EXT_ZBKC, + EXT_ZBKX, + EXT_ZKND, + EXT_ZKNE, + EXT_ZKNH, + EXT_ZKSED, + EXT_ZKSH, + EXT_ZKR, + EXT_ZMMUL, + EXT_ZBPBO, + EXT_ZPN, + EXT_ZPSFOPERAND, + EXT_SVNAPOT, + EXT_SVPBMT, + EXT_SVINVAL, + EXT_ZDINX, + EXT_ZFINX, + EXT_ZHINX, + EXT_ZHINXMIN, + EXT_ZICBOM, + EXT_ZICBOZ, + EXT_ZICNTR, + EXT_ZIHPM, + EXT_XZBP, + EXT_XZBS, + EXT_XZBE, + EXT_XZBF, + EXT_XZBC, + EXT_XZBM, + EXT_XZBR, + EXT_XZBT, +} isa_extension_t; + +typedef enum { + IMPL_MMU_SV32, + IMPL_MMU_SV39, + IMPL_MMU_SV48, + IMPL_MMU_SV57, + IMPL_MMU_SBARE, + IMPL_MMU, + IMPL_MMU_VMID, + IMPL_MMU_ASID, +} impl_extension_t; + +class isa_parser_t { +public: + isa_parser_t(const char* str, const char *priv); + ~isa_parser_t(){}; + unsigned get_max_xlen() const { return max_xlen; } + reg_t get_max_isa() const { return max_isa; } + std::string get_isa_string() const { return isa_string; } + bool extension_enabled(unsigned char ext) const { + if (ext >= 'A' && ext <= 'Z') + return (max_isa >> (ext - 'A')) & 1; + else + return extension_table[ext]; + } + const std::unordered_map & + get_extensions() const { return extensions; } + +protected: + unsigned max_xlen; + reg_t max_isa; + std::vector extension_table; + std::string isa_string; + std::unordered_map extensions; +}; + +#endif diff --git a/vendor/riscv-isa-sim/riscv/jtag_dtm.cc b/vendor/riscv-isa-sim/riscv/jtag_dtm.cc new file mode 100644 index 00000000..9ca38afb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/jtag_dtm.cc @@ -0,0 +1,204 @@ +#include + +#include "decode.h" +#include "jtag_dtm.h" +#include "debug_module.h" +#include "debug_defines.h" + +#if 0 +# define D(x) x +#else +# define D(x) +#endif + +enum { + IR_IDCODE=1, + IR_DTMCONTROL=0x10, + IR_DBUS=0x11, + IR_BYPASS=0x1f +}; + +#define DTMCONTROL_VERSION 0xf +#define DTMCONTROL_ABITS (0x3f << 4) +#define DTMCONTROL_DMISTAT (3<<10) +#define DTMCONTROL_IDLE (7<<12) +#define DTMCONTROL_DMIRESET (1<<16) +#define DTMCONTROL_DMIHARDRESET (1<<17) + +#define DMI_OP 3 +#define DMI_DATA (0xffffffffLL<<2) +#define DMI_ADDRESS ((1LL<<(abits+34)) - (1LL<<34)) + +#define DMI_OP_STATUS_SUCCESS 0 +#define DMI_OP_STATUS_RESERVED 1 +#define DMI_OP_STATUS_FAILED 2 +#define DMI_OP_STATUS_BUSY 3 + +#define DMI_OP_NOP 0 +#define DMI_OP_READ 1 +#define DMI_OP_WRITE 2 +#define DMI_OP_RESERVED 3 + +jtag_dtm_t::jtag_dtm_t(debug_module_t *dm, unsigned required_rti_cycles) : + dm(dm), required_rti_cycles(required_rti_cycles), + _tck(false), _tms(false), _tdi(false), _tdo(false), + dtmcontrol((abits << DTM_DTMCS_ABITS_OFFSET) | 1), + dmi(DMI_OP_STATUS_SUCCESS << DTM_DMI_OP_OFFSET), + bypass(0), + _state(TEST_LOGIC_RESET) +{ +} + +void jtag_dtm_t::reset() { + _state = TEST_LOGIC_RESET; + busy_stuck = false; + rti_remaining = 0; + dmi = 0; +} + +void jtag_dtm_t::set_pins(bool tck, bool tms, bool tdi) { + const jtag_state_t next[16][2] = { + /* TEST_LOGIC_RESET */ { RUN_TEST_IDLE, TEST_LOGIC_RESET }, + /* RUN_TEST_IDLE */ { RUN_TEST_IDLE, SELECT_DR_SCAN }, + /* SELECT_DR_SCAN */ { CAPTURE_DR, SELECT_IR_SCAN }, + /* CAPTURE_DR */ { SHIFT_DR, EXIT1_DR }, + /* SHIFT_DR */ { SHIFT_DR, EXIT1_DR }, + /* EXIT1_DR */ { PAUSE_DR, UPDATE_DR }, + /* PAUSE_DR */ { PAUSE_DR, EXIT2_DR }, + /* EXIT2_DR */ { SHIFT_DR, UPDATE_DR }, + /* UPDATE_DR */ { RUN_TEST_IDLE, SELECT_DR_SCAN }, + /* SELECT_IR_SCAN */ { CAPTURE_IR, TEST_LOGIC_RESET }, + /* CAPTURE_IR */ { SHIFT_IR, EXIT1_IR }, + /* SHIFT_IR */ { SHIFT_IR, EXIT1_IR }, + /* EXIT1_IR */ { PAUSE_IR, UPDATE_IR }, + /* PAUSE_IR */ { PAUSE_IR, EXIT2_IR }, + /* EXIT2_IR */ { SHIFT_IR, UPDATE_IR }, + /* UPDATE_IR */ { RUN_TEST_IDLE, SELECT_DR_SCAN } + }; + + if (!_tck && tck) { + // Positive clock edge. TMS and TDI are sampled on the rising edge of TCK by + // Target. + switch (_state) { + case SHIFT_DR: + dr >>= 1; + dr |= (uint64_t) _tdi << (dr_length-1); + break; + case SHIFT_IR: + ir >>= 1; + ir |= _tdi << (ir_length-1); + break; + default: + break; + } + _state = next[_state][_tms]; + + } else { + // Negative clock edge. TDO is updated. + switch (_state) { + case RUN_TEST_IDLE: + if (rti_remaining > 0) + rti_remaining--; + dm->run_test_idle(); + break; + case TEST_LOGIC_RESET: + ir = IR_IDCODE; + break; + case CAPTURE_DR: + capture_dr(); + break; + case SHIFT_DR: + _tdo = dr & 1; + break; + case UPDATE_DR: + update_dr(); + break; + case SHIFT_IR: + _tdo = ir & 1; + break; + default: + break; + } + } + + D(fprintf(stderr, "state=%2d, tdi=%d, tdo=%d, tms=%d, tck=%d, ir=0x%02x, " + "dr=0x%lx\n", + _state, _tdi, _tdo, _tms, _tck, ir, dr)); + + _tck = tck; + _tms = tms; + _tdi = tdi; +} + +void jtag_dtm_t::capture_dr() +{ + switch (ir) { + case IR_IDCODE: + dr = idcode; + dr_length = 32; + break; + case IR_DTMCONTROL: + dr = dtmcontrol; + dr_length = 32; + break; + case IR_DBUS: + if (rti_remaining > 0 || busy_stuck) { + dr = DMI_OP_STATUS_BUSY; + busy_stuck = true; + } else { + dr = dmi; + } + dr_length = abits + 34; + break; + case IR_BYPASS: + dr = bypass; + dr_length = 1; + break; + default: + fprintf(stderr, "Unsupported IR: 0x%x\n", ir); + break; + } + D(fprintf(stderr, "Capture DR; IR=0x%x, DR=0x%lx (%d bits)\n", + ir, dr, dr_length)); +} + +void jtag_dtm_t::update_dr() +{ + D(fprintf(stderr, "Update DR; IR=0x%x, DR=0x%lx (%d bits)\n", + ir, dr, dr_length)); + if (ir == IR_DTMCONTROL) { + if (dr & DTMCONTROL_DMIRESET) + busy_stuck = false; + if (dr & DTMCONTROL_DMIHARDRESET) + reset(); + } else if (ir == IR_BYPASS) { + bypass = dr; + } else if (ir == IR_DBUS && !busy_stuck) { + unsigned op = get_field(dr, DMI_OP); + uint32_t data = get_field(dr, DMI_DATA); + unsigned address = get_field(dr, DMI_ADDRESS); + + dmi = dr; + + bool success = true; + if (op == DMI_OP_READ) { + uint32_t value; + if (dm->dmi_read(address, &value)) { + dmi = set_field(dmi, DMI_DATA, value); + } else { + success = false; + } + } else if (op == DMI_OP_WRITE) { + success = dm->dmi_write(address, data); + } + + if (success) { + dmi = set_field(dmi, DMI_OP, DMI_OP_STATUS_SUCCESS); + } else { + dmi = set_field(dmi, DMI_OP, DMI_OP_STATUS_FAILED); + } + D(fprintf(stderr, "dmi=0x%lx\n", dmi)); + + rti_remaining = required_rti_cycles; + } +} diff --git a/vendor/riscv-isa-sim/riscv/jtag_dtm.h b/vendor/riscv-isa-sim/riscv/jtag_dtm.h new file mode 100644 index 00000000..23a54be1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/jtag_dtm.h @@ -0,0 +1,69 @@ +#ifndef JTAG_DTM_H +#define JTAG_DTM_H + +#include + +class debug_module_t; + +typedef enum { + TEST_LOGIC_RESET, + RUN_TEST_IDLE, + SELECT_DR_SCAN, + CAPTURE_DR, + SHIFT_DR, + EXIT1_DR, + PAUSE_DR, + EXIT2_DR, + UPDATE_DR, + SELECT_IR_SCAN, + CAPTURE_IR, + SHIFT_IR, + EXIT1_IR, + PAUSE_IR, + EXIT2_IR, + UPDATE_IR +} jtag_state_t; + +class jtag_dtm_t +{ + static const unsigned idcode = 0xdeadbeef; + + public: + jtag_dtm_t(debug_module_t *dm, unsigned required_rti_cycles); + void reset(); + + void set_pins(bool tck, bool tms, bool tdi); + + bool tdo() const { return _tdo; } + + jtag_state_t state() const { return _state; } + + private: + debug_module_t *dm; + // The number of Run-Test/Idle cycles required before a DMI access is + // complete. + unsigned required_rti_cycles; + bool _tck, _tms, _tdi, _tdo; + uint32_t ir; + const unsigned ir_length = 5; + uint64_t dr; + unsigned dr_length; + + // abits must come before dtmcontrol so it can easily be used in the + // constructor. + const unsigned abits = 6; + uint32_t dtmcontrol; + uint64_t dmi; + unsigned bypass; + // Number of Run-Test/Idle cycles needed before we call this access + // complete. + unsigned rti_remaining; + bool busy_stuck; + + jtag_state_t _state; + + void capture_dr(); + void update_dr(); +}; + +#endif diff --git a/vendor/riscv-isa-sim/riscv/log_file.h b/vendor/riscv-isa-sim/riscv/log_file.h new file mode 100644 index 00000000..d039859d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/log_file.h @@ -0,0 +1,37 @@ +// See LICENSE for license details. +#ifndef _RISCV_LOGFILE_H +#define _RISCV_LOGFILE_H + +#include +#include +#include +#include + +// Header-only class wrapping a log file. When constructed with an +// actual path, it opens the named file for writing. When constructed +// with the null path, it wraps stderr. +class log_file_t +{ +public: + log_file_t(const char *path) + : wrapped_file (nullptr, &fclose) + { + if (!path) + return; + + wrapped_file.reset(fopen(path, "w")); + if (! wrapped_file) { + std::ostringstream oss; + oss << "Failed to open log file at `" << path << "': " + << strerror (errno); + throw std::runtime_error(oss.str()); + } + } + + FILE *get() { return wrapped_file ? wrapped_file.get() : stderr; } + +private: + std::unique_ptr wrapped_file; +}; + +#endif diff --git a/vendor/riscv-isa-sim/riscv/memtracer.h b/vendor/riscv-isa-sim/riscv/memtracer.h new file mode 100644 index 00000000..72bb3a88 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/memtracer.h @@ -0,0 +1,56 @@ +// See LICENSE for license details. + +#ifndef _MEMTRACER_H +#define _MEMTRACER_H + +#include +#include +#include + +enum access_type { + LOAD, + STORE, + FETCH, +}; + +class memtracer_t +{ + public: + memtracer_t() {} + virtual ~memtracer_t() {} + + virtual bool interested_in_range(uint64_t begin, uint64_t end, access_type type) = 0; + virtual void trace(uint64_t addr, size_t bytes, access_type type) = 0; + virtual void clean_invalidate(uint64_t addr, size_t bytes, bool clean, bool inval) = 0; +}; + +class memtracer_list_t : public memtracer_t +{ + public: + bool empty() { return list.empty(); } + bool interested_in_range(uint64_t begin, uint64_t end, access_type type) + { + for (auto it: list) + if (it->interested_in_range(begin, end, type)) + return true; + return false; + } + void trace(uint64_t addr, size_t bytes, access_type type) + { + for (auto it: list) + it->trace(addr, bytes, type); + } + void clean_invalidate(uint64_t addr, size_t bytes, bool clean, bool inval) + { + for (auto it: list) + it->clean_invalidate(addr, bytes, clean, inval); + } + void hook(memtracer_t* h) + { + list.push_back(h); + } + private: + std::vector list; +}; + +#endif diff --git a/vendor/riscv-isa-sim/riscv/mmio_plugin.h b/vendor/riscv-isa-sim/riscv/mmio_plugin.h new file mode 100644 index 00000000..f14470bf --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/mmio_plugin.h @@ -0,0 +1,91 @@ +#ifndef _RISCV_MMIO_PLUGIN_H +#define _RISCV_MMIO_PLUGIN_H + +#include +#include +#include + +#ifdef __cplusplus +extern "C" +{ +#endif + +typedef uint64_t reg_t; + +typedef struct { + // Allocate user data for an instance of the plugin. The parameter is a simple + // c-string containing arguments used to construct the plugin. It returns a + // void* to the allocated data. + void* (*alloc)(const char*); + + // Load a memory address of the MMIO plugin. The parameters are the user_data + // (void*), memory offset (reg_t), number of bytes to load (size_t), and the + // buffer into which the loaded data should be written (uint8_t*). Return true + // if the load is successful and false otherwise. + bool (*load)(void*, reg_t, size_t, uint8_t*); + + // Store some bytes to a memory address of the MMIO plugin. The parameters are + // the user_data (void*), memory offset (reg_t), number of bytes to store + // (size_t), and the buffer containing the data to be stored (const uint8_t*). + // Return true if the store is successful and false otherwise. + bool (*store)(void*, reg_t, size_t, const uint8_t*); + + // Deallocate the data allocated during the call to alloc. The parameter is a + // pointer to the user data allocated during the call to alloc. + void (*dealloc)(void*); +} mmio_plugin_t; + +// Register an mmio plugin with the application. This should be called by +// plugins as part of their loading process. +extern void register_mmio_plugin(const char* name_cstr, + const mmio_plugin_t* mmio_plugin); + +#ifdef __cplusplus +} + +#include + +// Wrapper around the C plugin API that makes registering a C++ class with +// correctly formed constructor, load, and store functions easier. The template +// type should be the type that implements the MMIO plugin interface. Simply +// make a global mmio_plugin_registration_t and your plugin should register +// itself with the application when it is loaded because the +// mmio_plugin_registration_t constructor will be called. +template +struct mmio_plugin_registration_t +{ + static void* alloc(const char* args) + { + return reinterpret_cast(new T(std::string(args))); + } + + static bool load(void* self, reg_t addr, size_t len, uint8_t* bytes) + { + return reinterpret_cast(self)->load(addr, len, bytes); + } + + static bool store(void* self, reg_t addr, size_t len, const uint8_t* bytes) + { + return reinterpret_cast(self)->store(addr, len, bytes); + } + + static void dealloc(void* self) + { + delete reinterpret_cast(self); + } + + mmio_plugin_registration_t(const std::string& name) + { + mmio_plugin_t plugin = { + mmio_plugin_registration_t::alloc, + mmio_plugin_registration_t::load, + mmio_plugin_registration_t::store, + mmio_plugin_registration_t::dealloc, + }; + + register_mmio_plugin(name.c_str(), &plugin); + } +}; +#endif // __cplusplus + +#endif diff --git a/vendor/riscv-isa-sim/riscv/mmu.cc b/vendor/riscv-isa-sim/riscv/mmu.cc new file mode 100644 index 00000000..db787a80 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/mmu.cc @@ -0,0 +1,447 @@ +// See LICENSE for license details. + +#include "mmu.h" +#include "arith.h" +#include "simif.h" +#include "processor.h" + +mmu_t::mmu_t(simif_t* sim, processor_t* proc) + : sim(sim), proc(proc), +#ifdef RISCV_ENABLE_DUAL_ENDIAN + target_big_endian(false), +#endif + check_triggers_fetch(false), + check_triggers_load(false), + check_triggers_store(false), + matched_trigger(NULL) +{ + flush_tlb(); + yield_load_reservation(); +} + +mmu_t::~mmu_t() +{ +} + +void mmu_t::flush_icache() +{ + for (size_t i = 0; i < ICACHE_ENTRIES; i++) + icache[i].tag = -1; +} + +void mmu_t::flush_tlb() +{ + memset(tlb_insn_tag, -1, sizeof(tlb_insn_tag)); + memset(tlb_load_tag, -1, sizeof(tlb_load_tag)); + memset(tlb_store_tag, -1, sizeof(tlb_store_tag)); + + flush_icache(); +} + +static void throw_access_exception(bool virt, reg_t addr, access_type type) +{ + switch (type) { + case FETCH: throw trap_instruction_access_fault(virt, addr, 0, 0); + case LOAD: throw trap_load_access_fault(virt, addr, 0, 0); + case STORE: throw trap_store_access_fault(virt, addr, 0, 0); + default: abort(); + } +} + +reg_t mmu_t::translate(reg_t addr, reg_t len, access_type type, uint32_t xlate_flags) +{ + if (!proc) + return addr; + + bool virt = proc->state.v; + bool hlvx = xlate_flags & RISCV_XLATE_VIRT_HLVX; + reg_t mode = proc->state.prv; + if (type != FETCH) { + if (!proc->state.debug_mode && get_field(proc->state.mstatus->read(), MSTATUS_MPRV)) { + mode = get_field(proc->state.mstatus->read(), MSTATUS_MPP); + if (get_field(proc->state.mstatus->read(), MSTATUS_MPV) && mode != PRV_M) + virt = true; + } + if (xlate_flags & RISCV_XLATE_VIRT) { + virt = true; + mode = get_field(proc->state.hstatus->read(), HSTATUS_SPVP); + } + } + + reg_t paddr = walk(addr, type, mode, virt, hlvx) | (addr & (PGSIZE-1)); + if (!pmp_ok(paddr, len, type, mode)) + throw_access_exception(virt, addr, type); + return paddr; +} + +tlb_entry_t mmu_t::fetch_slow_path(reg_t vaddr) +{ + reg_t paddr = translate(vaddr, sizeof(fetch_temp), FETCH, 0); + + if (auto host_addr = sim->addr_to_mem(paddr)) { + return refill_tlb(vaddr, paddr, host_addr, FETCH); + } else { + if (!mmio_load(paddr, sizeof fetch_temp, (uint8_t*)&fetch_temp)) + throw trap_instruction_access_fault(proc->state.v, vaddr, 0, 0); + tlb_entry_t entry = {(char*)&fetch_temp - vaddr, paddr - vaddr}; + return entry; + } +} + +reg_t reg_from_bytes(size_t len, const uint8_t* bytes) +{ + switch (len) { + case 1: + return bytes[0]; + case 2: + return bytes[0] | + (((reg_t) bytes[1]) << 8); + case 4: + return bytes[0] | + (((reg_t) bytes[1]) << 8) | + (((reg_t) bytes[2]) << 16) | + (((reg_t) bytes[3]) << 24); + case 8: + return bytes[0] | + (((reg_t) bytes[1]) << 8) | + (((reg_t) bytes[2]) << 16) | + (((reg_t) bytes[3]) << 24) | + (((reg_t) bytes[4]) << 32) | + (((reg_t) bytes[5]) << 40) | + (((reg_t) bytes[6]) << 48) | + (((reg_t) bytes[7]) << 56); + } + abort(); +} + +bool mmu_t::mmio_ok(reg_t addr, access_type type) +{ + // Disallow access to debug region when not in debug mode + if (addr >= DEBUG_START && addr <= DEBUG_END && proc && !proc->state.debug_mode) + return false; + + return true; +} + +bool mmu_t::mmio_load(reg_t addr, size_t len, uint8_t* bytes) +{ + if (!mmio_ok(addr, LOAD)) + return false; + + return sim->mmio_load(addr, len, bytes); +} + +bool mmu_t::mmio_store(reg_t addr, size_t len, const uint8_t* bytes) +{ + if (!mmio_ok(addr, STORE)) + return false; + + return sim->mmio_store(addr, len, bytes); +} + +void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags) +{ + reg_t paddr = translate(addr, len, LOAD, xlate_flags); + + if (auto host_addr = sim->addr_to_mem(paddr)) { + memcpy(bytes, host_addr, len); + if (tracer.interested_in_range(paddr, paddr + PGSIZE, LOAD)) + tracer.trace(paddr, len, LOAD); + else if (xlate_flags == 0) + refill_tlb(addr, paddr, host_addr, LOAD); + } else if (!mmio_load(paddr, len, bytes)) { + throw trap_load_access_fault((proc) ? proc->state.v : false, addr, 0, 0); + } + + if (!matched_trigger) { + reg_t data = reg_from_bytes(len, bytes); + matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, data); + if (matched_trigger) + throw *matched_trigger; + } +} + +void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store) +{ + reg_t paddr = translate(addr, len, STORE, xlate_flags); + + if (!matched_trigger) { + reg_t data = reg_from_bytes(len, bytes); + matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, data); + if (matched_trigger) + throw *matched_trigger; + } + + if (actually_store) { + if (auto host_addr = sim->addr_to_mem(paddr)) { + memcpy(host_addr, bytes, len); + if (tracer.interested_in_range(paddr, paddr + PGSIZE, STORE)) + tracer.trace(paddr, len, STORE); + else if (xlate_flags == 0) + refill_tlb(addr, paddr, host_addr, STORE); + } else if (!mmio_store(paddr, len, bytes)) { + throw trap_store_access_fault((proc) ? proc->state.v : false, addr, 0, 0); + } + } +} + +tlb_entry_t mmu_t::refill_tlb(reg_t vaddr, reg_t paddr, char* host_addr, access_type type) +{ + reg_t idx = (vaddr >> PGSHIFT) % TLB_ENTRIES; + reg_t expected_tag = vaddr >> PGSHIFT; + + tlb_entry_t entry = {host_addr - vaddr, paddr - vaddr}; + + if (proc && get_field(proc->state.mstatus->read(), MSTATUS_MPRV)) + return entry; + + if ((tlb_load_tag[idx] & ~TLB_CHECK_TRIGGERS) != expected_tag) + tlb_load_tag[idx] = -1; + if ((tlb_store_tag[idx] & ~TLB_CHECK_TRIGGERS) != expected_tag) + tlb_store_tag[idx] = -1; + if ((tlb_insn_tag[idx] & ~TLB_CHECK_TRIGGERS) != expected_tag) + tlb_insn_tag[idx] = -1; + + if ((check_triggers_fetch && type == FETCH) || + (check_triggers_load && type == LOAD) || + (check_triggers_store && type == STORE)) + expected_tag |= TLB_CHECK_TRIGGERS; + + if (pmp_homogeneous(paddr & ~reg_t(PGSIZE - 1), PGSIZE)) { + if (type == FETCH) tlb_insn_tag[idx] = expected_tag; + else if (type == STORE) tlb_store_tag[idx] = expected_tag; + else tlb_load_tag[idx] = expected_tag; + } + + tlb_data[idx] = entry; + return entry; +} + +bool mmu_t::pmp_ok(reg_t addr, reg_t len, access_type type, reg_t mode) +{ + if (!proc || proc->n_pmp == 0) + return true; + + bool mseccfg_mml = proc->state.mseccfg->get_mml(); + bool mseccfg_mmwp = proc->state.mseccfg->get_mmwp(); + + for (size_t i = 0; i < proc->n_pmp; i++) { + // Check each 4-byte sector of the access + bool any_match = false; + bool all_match = true; + for (reg_t offset = 0; offset < len; offset += 1 << PMP_SHIFT) { + reg_t cur_addr = addr + offset; + bool match = proc->state.pmpaddr[i]->match4(cur_addr); + any_match |= match; + all_match &= match; + } + + if (any_match) { + // If the PMP matches only a strict subset of the access, fail it + if (!all_match) + return false; + + return proc->state.pmpaddr[i]->access_ok(type, mode); + } + } + return ((mode == PRV_M) && !mseccfg_mmwp && (!mseccfg_mml || ((type == LOAD) || (type == STORE)))); +} + +reg_t mmu_t::pmp_homogeneous(reg_t addr, reg_t len) +{ + if ((addr | len) & (len - 1)) + abort(); + + if (!proc) + return true; + + for (size_t i = 0; i < proc->n_pmp; i++) + if (proc->state.pmpaddr[i]->subset_match(addr, len)) + return false; + + return true; +} + +reg_t mmu_t::s2xlate(reg_t gva, reg_t gpa, access_type type, access_type trap_type, bool virt, bool hlvx) +{ + if (!virt) + return gpa; + + vm_info vm = decode_vm_info(proc->get_const_xlen(), true, 0, proc->get_state()->hgatp->read()); + if (vm.levels == 0) + return gpa; + + int maxgpabits = vm.levels * vm.idxbits + vm.widenbits + PGSHIFT; + reg_t maxgpa = (1ULL << maxgpabits) - 1; + + bool mxr = proc->state.sstatus->readvirt(false) & MSTATUS_MXR; + + reg_t base = vm.ptbase; + if ((gpa & ~maxgpa) == 0) { + for (int i = vm.levels - 1; i >= 0; i--) { + int ptshift = i * vm.idxbits; + int idxbits = (i == (vm.levels - 1)) ? vm.idxbits + vm.widenbits : vm.idxbits; + reg_t idx = (gpa >> (PGSHIFT + ptshift)) & ((reg_t(1) << idxbits) - 1); + + // check that physical address of PTE is legal + auto pte_paddr = base + idx * vm.ptesize; + auto ppte = sim->addr_to_mem(pte_paddr); + if (!ppte || !pmp_ok(pte_paddr, vm.ptesize, LOAD, PRV_S)) { + throw_access_exception(virt, gva, trap_type); + } + + reg_t pte = vm.ptesize == 4 ? from_target(*(target_endian*)ppte) : from_target(*(target_endian*)ppte); + reg_t ppn = (pte & ~reg_t(PTE_ATTR)) >> PTE_PPN_SHIFT; + + if (pte & PTE_RSVD) { + break; + } else if (!proc->extension_enabled(EXT_SVNAPOT) && (pte & PTE_N)) { + break; + } else if (!proc->extension_enabled(EXT_SVPBMT) && (pte & PTE_PBMT)) { + break; + } else if (PTE_TABLE(pte)) { // next level of page table + if (pte & (PTE_D | PTE_A | PTE_U | PTE_N | PTE_PBMT)) + break; + base = ppn << PGSHIFT; + } else if (!(pte & PTE_V) || (!(pte & PTE_R) && (pte & PTE_W))) { + break; + } else if (!(pte & PTE_U)) { + break; + } else if (type == FETCH || hlvx ? !(pte & PTE_X) : + type == LOAD ? !(pte & PTE_R) && !(mxr && (pte & PTE_X)) : + !((pte & PTE_R) && (pte & PTE_W))) { + break; + } else if ((ppn & ((reg_t(1) << ptshift) - 1)) != 0) { + break; + } else { + reg_t ad = PTE_A | ((type == STORE) * PTE_D); +#ifdef RISCV_ENABLE_DIRTY + // set accessed and possibly dirty bits. + if ((pte & ad) != ad) { + if (!pmp_ok(pte_paddr, vm.ptesize, STORE, PRV_S)) + throw_access_exception(virt, gva, trap_type); + *(target_endian*)ppte |= to_target((uint32_t)ad); + } +#else + // take exception if access or possibly dirty bit is not set. + if ((pte & ad) != ad) + break; +#endif + reg_t vpn = gpa >> PGSHIFT; + reg_t page_mask = (reg_t(1) << PGSHIFT) - 1; + + int napot_bits = ((pte & PTE_N) ? (ctz(ppn) + 1) : 0); + if (((pte & PTE_N) && (ppn == 0 || i != 0)) || (napot_bits != 0 && napot_bits != 4)) + break; + + reg_t page_base = ((ppn & ~((reg_t(1) << napot_bits) - 1)) + | (vpn & ((reg_t(1) << napot_bits) - 1)) + | (vpn & ((reg_t(1) << ptshift) - 1))) << PGSHIFT; + return page_base | (gpa & page_mask); + } + } + } + + switch (trap_type) { + case FETCH: throw trap_instruction_guest_page_fault(gva, gpa >> 2, 0); + case LOAD: throw trap_load_guest_page_fault(gva, gpa >> 2, 0); + case STORE: throw trap_store_guest_page_fault(gva, gpa >> 2, 0); + default: abort(); + } +} + +reg_t mmu_t::walk(reg_t addr, access_type type, reg_t mode, bool virt, bool hlvx) +{ + reg_t page_mask = (reg_t(1) << PGSHIFT) - 1; + reg_t satp = proc->get_state()->satp->readvirt(virt); + vm_info vm = decode_vm_info(proc->get_const_xlen(), false, mode, satp); + if (vm.levels == 0) + return s2xlate(addr, addr & ((reg_t(2) << (proc->xlen-1))-1), type, type, virt, hlvx) & ~page_mask; // zero-extend from xlen + + bool s_mode = mode == PRV_S; + bool sum = proc->state.sstatus->readvirt(virt) & MSTATUS_SUM; + bool mxr = (proc->state.sstatus->readvirt(false) | proc->state.sstatus->readvirt(virt)) & MSTATUS_MXR; + + // verify bits xlen-1:va_bits-1 are all equal + int va_bits = PGSHIFT + vm.levels * vm.idxbits; + reg_t mask = (reg_t(1) << (proc->xlen - (va_bits-1))) - 1; + reg_t masked_msbs = (addr >> (va_bits-1)) & mask; + if (masked_msbs != 0 && masked_msbs != mask) + vm.levels = 0; + + reg_t base = vm.ptbase; + for (int i = vm.levels - 1; i >= 0; i--) { + int ptshift = i * vm.idxbits; + reg_t idx = (addr >> (PGSHIFT + ptshift)) & ((1 << vm.idxbits) - 1); + + // check that physical address of PTE is legal + auto pte_paddr = s2xlate(addr, base + idx * vm.ptesize, LOAD, type, virt, false); + auto ppte = sim->addr_to_mem(pte_paddr); + if (!ppte || !pmp_ok(pte_paddr, vm.ptesize, LOAD, PRV_S)) + throw_access_exception(virt, addr, type); + + reg_t pte = vm.ptesize == 4 ? from_target(*(target_endian*)ppte) : from_target(*(target_endian*)ppte); + reg_t ppn = (pte & ~reg_t(PTE_ATTR)) >> PTE_PPN_SHIFT; + + if (pte & PTE_RSVD) { + break; + } else if (!proc->extension_enabled(EXT_SVNAPOT) && (pte & PTE_N)) { + break; + } else if (!proc->extension_enabled(EXT_SVPBMT) && (pte & PTE_PBMT)) { + break; + } else if (PTE_TABLE(pte)) { // next level of page table + if (pte & (PTE_D | PTE_A | PTE_U | PTE_N | PTE_PBMT)) + break; + base = ppn << PGSHIFT; + } else if ((pte & PTE_U) ? s_mode && (type == FETCH || !sum) : !s_mode) { + break; + } else if (!(pte & PTE_V) || (!(pte & PTE_R) && (pte & PTE_W))) { + break; + } else if (type == FETCH || hlvx ? !(pte & PTE_X) : + type == LOAD ? !(pte & PTE_R) && !(mxr && (pte & PTE_X)) : + !((pte & PTE_R) && (pte & PTE_W))) { + break; + } else if ((ppn & ((reg_t(1) << ptshift) - 1)) != 0) { + break; + } else { + reg_t ad = PTE_A | ((type == STORE) * PTE_D); +#ifdef RISCV_ENABLE_DIRTY + // set accessed and possibly dirty bits. + if ((pte & ad) != ad) { + if (!pmp_ok(pte_paddr, vm.ptesize, STORE, PRV_S)) + throw_access_exception(virt, addr, type); + *(target_endian*)ppte |= to_target((uint32_t)ad); + } +#else + // take exception if access or possibly dirty bit is not set. + if ((pte & ad) != ad) + break; +#endif + // for superpage or Svnapot NAPOT mappings, make a fake leaf PTE for the TLB's benefit. + reg_t vpn = addr >> PGSHIFT; + + int napot_bits = ((pte & PTE_N) ? (ctz(ppn) + 1) : 0); + if (((pte & PTE_N) && (ppn == 0 || i != 0)) || (napot_bits != 0 && napot_bits != 4)) + break; + + reg_t page_base = ((ppn & ~((reg_t(1) << napot_bits) - 1)) + | (vpn & ((reg_t(1) << napot_bits) - 1)) + | (vpn & ((reg_t(1) << ptshift) - 1))) << PGSHIFT; + reg_t phys = page_base | (addr & page_mask); + return s2xlate(addr, phys, type, type, virt, hlvx) & ~page_mask; + } + } + + switch (type) { + case FETCH: throw trap_instruction_page_fault(virt, addr, 0, 0); + case LOAD: throw trap_load_page_fault(virt, addr, 0, 0); + case STORE: throw trap_store_page_fault(virt, addr, 0, 0); + default: abort(); + } +} + +void mmu_t::register_memtracer(memtracer_t* t) +{ + flush_tlb(); + tracer.hook(t); +} diff --git a/vendor/riscv-isa-sim/riscv/mmu.h b/vendor/riscv-isa-sim/riscv/mmu.h new file mode 100644 index 00000000..8964e294 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/mmu.h @@ -0,0 +1,559 @@ +// See LICENSE for license details. + +#ifndef _RISCV_MMU_H +#define _RISCV_MMU_H + +#include "decode.h" +#include "trap.h" +#include "common.h" +#include "config.h" +#include "simif.h" +#include "processor.h" +#include "memtracer.h" +#include "byteorder.h" +#include "triggers.h" +#include +#include + +// virtual memory configuration +#define PGSHIFT 12 +const reg_t PGSIZE = 1 << PGSHIFT; +const reg_t PGMASK = ~(PGSIZE-1); +#define MAX_PADDR_BITS 56 // imposed by Sv39 / Sv48 + +struct insn_fetch_t +{ + insn_func_t func; + insn_t insn; +}; + +struct icache_entry_t { + reg_t tag; + struct icache_entry_t* next; + insn_fetch_t data; +}; + +struct tlb_entry_t { + char* host_offset; + reg_t target_offset; +}; + +// this class implements a processor's port into the virtual memory system. +// an MMU and instruction cache are maintained for simulator performance. +class mmu_t +{ +private: + std::map alloc_cache; + std::vector> addr_tbl; +public: + mmu_t(simif_t* sim, processor_t* proc); + ~mmu_t(); + +#define RISCV_XLATE_VIRT (1U << 0) +#define RISCV_XLATE_VIRT_HLVX (1U << 1) + + inline reg_t misaligned_load(reg_t addr, size_t size, uint32_t xlate_flags) + { +#ifdef RISCV_ENABLE_MISALIGNED + reg_t res = 0; + for (size_t i = 0; i < size; i++) + res += (reg_t)load_uint8(addr + (target_big_endian? size-1-i : i)) << (i * 8); + return res; +#else + bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags); + throw trap_load_address_misaligned(gva, addr, 0, 0); +#endif + } + + inline void misaligned_store(reg_t addr, reg_t data, size_t size, uint32_t xlate_flags, bool actually_store=true) + { +#ifdef RISCV_ENABLE_MISALIGNED + for (size_t i = 0; i < size; i++) + store_uint8(addr + (target_big_endian? size-1-i : i), data >> (i * 8), actually_store); +#else + bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags); + throw trap_store_address_misaligned(gva, addr, 0, 0); +#endif + } + +#ifndef RISCV_ENABLE_COMMITLOG +# define READ_MEM(addr, size) ({}) +#else +# define READ_MEM(addr, size) \ + proc->state.log_mem_read.push_back(std::make_tuple(addr, 0, size)); +#endif + + // template for functions that load an aligned value from memory + #define load_func(type, prefix, xlate_flags) \ + inline type##_t prefix##_##type(reg_t addr, bool require_alignment = false) { \ + if (unlikely(addr & (sizeof(type##_t)-1))) { \ + if (require_alignment) load_reserved_address_misaligned(addr); \ + else return misaligned_load(addr, sizeof(type##_t), xlate_flags); \ + } \ + reg_t vpn = addr >> PGSHIFT; \ + size_t size = sizeof(type##_t); \ + if ((xlate_flags) == 0 && likely(tlb_load_tag[vpn % TLB_ENTRIES] == vpn)) { \ + if (proc) READ_MEM(addr, size); \ + return from_target(*(target_endian*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr)); \ + } \ + if ((xlate_flags) == 0 && unlikely(tlb_load_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \ + type##_t data = from_target(*(target_endian*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr)); \ + if (!matched_trigger) { \ + matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, data); \ + if (matched_trigger) \ + throw *matched_trigger; \ + } \ + if (proc) READ_MEM(addr, size); \ + return data; \ + } \ + target_endian res; \ + load_slow_path(addr, sizeof(type##_t), (uint8_t*)&res, (xlate_flags)); \ + if (proc) READ_MEM(addr, size); \ + return from_target(res); \ + } + + // load value from memory at aligned address; zero extend to register width + load_func(uint8, load, 0) + load_func(uint16, load, 0) + load_func(uint32, load, 0) + load_func(uint64, load, 0) + + // load value from guest memory at aligned address; zero extend to register width + load_func(uint8, guest_load, RISCV_XLATE_VIRT) + load_func(uint16, guest_load, RISCV_XLATE_VIRT) + load_func(uint32, guest_load, RISCV_XLATE_VIRT) + load_func(uint64, guest_load, RISCV_XLATE_VIRT) + load_func(uint16, guest_load_x, RISCV_XLATE_VIRT|RISCV_XLATE_VIRT_HLVX) + load_func(uint32, guest_load_x, RISCV_XLATE_VIRT|RISCV_XLATE_VIRT_HLVX) + + // load value from memory at aligned address; sign extend to register width + load_func(int8, load, 0) + load_func(int16, load, 0) + load_func(int32, load, 0) + load_func(int64, load, 0) + + // load value from guest memory at aligned address; sign extend to register width + load_func(int8, guest_load, RISCV_XLATE_VIRT) + load_func(int16, guest_load, RISCV_XLATE_VIRT) + load_func(int32, guest_load, RISCV_XLATE_VIRT) + load_func(int64, guest_load, RISCV_XLATE_VIRT) + +#ifndef RISCV_ENABLE_COMMITLOG +# define WRITE_MEM(addr, value, size) ({}) +#else +# define WRITE_MEM(addr, val, size) \ + proc->state.log_mem_write.push_back(std::make_tuple(addr, val, size)); +#endif + + // template for functions that store an aligned value to memory + #define store_func(type, prefix, xlate_flags) \ + void prefix##_##type(reg_t addr, type##_t val, bool actually_store=true, bool require_alignment=false) { \ + if (unlikely(addr & (sizeof(type##_t)-1))) { \ + if (require_alignment) store_conditional_address_misaligned(addr); \ + else return misaligned_store(addr, val, sizeof(type##_t), xlate_flags, actually_store); \ + } \ + reg_t vpn = addr >> PGSHIFT; \ + size_t size = sizeof(type##_t); \ + if ((xlate_flags) == 0 && likely(tlb_store_tag[vpn % TLB_ENTRIES] == vpn)) { \ + if (actually_store) { \ + if (proc) WRITE_MEM(addr, val, size); \ + *(target_endian*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_target(val); \ + } \ + } \ + else if ((xlate_flags) == 0 && unlikely(tlb_store_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \ + if (actually_store) { \ + if (!matched_trigger) { \ + matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, val); \ + if (matched_trigger) \ + throw *matched_trigger; \ + } \ + if (proc) WRITE_MEM(addr, val, size); \ + *(target_endian*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_target(val); \ + } \ + } \ + else { \ + target_endian target_val = to_target(val); \ + store_slow_path(addr, sizeof(type##_t), (const uint8_t*)&target_val, (xlate_flags), actually_store); \ + if (actually_store && proc) WRITE_MEM(addr, val, size); \ + } \ + } + + // AMO/Zicbom faults should be reported as store faults + #define convert_load_traps_to_store_traps(BODY) \ + try { \ + BODY \ + } catch (trap_load_address_misaligned& t) { \ + /* Misaligned fault will not be triggered by Zicbom */ \ + throw trap_store_address_misaligned(t.has_gva(), t.get_tval(), t.get_tval2(), t.get_tinst()); \ + } catch (trap_load_page_fault& t) { \ + throw trap_store_page_fault(t.has_gva(), t.get_tval(), t.get_tval2(), t.get_tinst()); \ + } catch (trap_load_access_fault& t) { \ + throw trap_store_access_fault(t.has_gva(), t.get_tval(), t.get_tval2(), t.get_tinst()); \ + } catch (trap_load_guest_page_fault& t) { \ + throw trap_store_guest_page_fault(t.get_tval(), t.get_tval2(), t.get_tinst()); \ + } + + // template for functions that perform an atomic memory operation + #define amo_func(type) \ + template \ + type##_t amo_##type(reg_t addr, op f) { \ + convert_load_traps_to_store_traps({ \ + store_##type(addr, 0, false, true); \ + auto lhs = load_##type(addr, true); \ + store_##type(addr, f(lhs)); \ + return lhs; \ + }) \ + } + + void store_float128(reg_t addr, float128_t val) + { +#ifndef RISCV_ENABLE_MISALIGNED + if (unlikely(addr & (sizeof(float128_t)-1))) + throw trap_store_address_misaligned((proc) ? proc->state.v : false, addr, 0, 0); +#endif + store_uint64(addr, val.v[0]); + store_uint64(addr + 8, val.v[1]); + } + + float128_t load_float128(reg_t addr) + { +#ifndef RISCV_ENABLE_MISALIGNED + if (unlikely(addr & (sizeof(float128_t)-1))) + throw trap_load_address_misaligned((proc) ? proc->state.v : false, addr, 0, 0); +#endif + return (float128_t){load_uint64(addr), load_uint64(addr + 8)}; + } + + // store value to memory at aligned address + store_func(uint8, store, 0) + store_func(uint16, store, 0) + store_func(uint32, store, 0) + store_func(uint64, store, 0) + + // store value to guest memory at aligned address + store_func(uint8, guest_store, RISCV_XLATE_VIRT) + store_func(uint16, guest_store, RISCV_XLATE_VIRT) + store_func(uint32, guest_store, RISCV_XLATE_VIRT) + store_func(uint64, guest_store, RISCV_XLATE_VIRT) + + // perform an atomic memory operation at an aligned address + amo_func(uint32) + amo_func(uint64) + + void cbo_zero(reg_t addr) { + auto base = addr & ~(blocksz - 1); + for (size_t offset = 0; offset < blocksz; offset += 1) + store_uint8(base + offset, 0); + } + + void clean_inval(reg_t addr, bool clean, bool inval) { + convert_load_traps_to_store_traps({ + reg_t paddr = addr & ~(blocksz - 1); + paddr = translate(paddr, blocksz, LOAD, 0); + if (auto host_addr = sim->addr_to_mem(paddr)) { + if (tracer.interested_in_range(paddr, paddr + PGSIZE, LOAD)) + tracer.clean_invalidate(paddr, blocksz, clean, inval); + } else { + throw trap_store_access_fault((proc) ? proc->state.v : false, addr, 0, 0); + } + }) + } + + inline void yield_load_reservation() + { + load_reservation_address = (reg_t)-1; + } + + inline void acquire_load_reservation(reg_t vaddr) + { + reg_t paddr = translate(vaddr, 1, LOAD, 0); + if (auto host_addr = sim->addr_to_mem(paddr)) + load_reservation_address = refill_tlb(vaddr, paddr, host_addr, LOAD).target_offset + vaddr; + else + throw trap_load_access_fault((proc) ? proc->state.v : false, vaddr, 0, 0); // disallow LR to I/O space + } + + inline void load_reserved_address_misaligned(reg_t vaddr) + { + bool gva = proc ? proc->state.v : false; +#ifdef RISCV_ENABLE_MISALIGNED + throw trap_load_access_fault(gva, vaddr, 0, 0); +#else + throw trap_load_address_misaligned(gva, vaddr, 0, 0); +#endif + } + + inline void store_conditional_address_misaligned(reg_t vaddr) + { + bool gva = proc ? proc->state.v : false; +#ifdef RISCV_ENABLE_MISALIGNED + throw trap_store_access_fault(gva, vaddr, 0, 0); +#else + throw trap_store_address_misaligned(gva, vaddr, 0, 0); +#endif + } + + inline bool check_load_reservation(reg_t vaddr, size_t size) + { + if (vaddr & (size-1)) + store_conditional_address_misaligned(vaddr); + + reg_t paddr = translate(vaddr, 1, STORE, 0); + if (auto host_addr = sim->addr_to_mem(paddr)) + return load_reservation_address == refill_tlb(vaddr, paddr, host_addr, STORE).target_offset + vaddr; + else + throw trap_store_access_fault((proc) ? proc->state.v : false, vaddr, 0, 0); // disallow SC to I/O space + } + + static const reg_t ICACHE_ENTRIES = 1024; + + inline size_t icache_index(reg_t addr) + { + return (addr / PC_ALIGN) % ICACHE_ENTRIES; + } + + inline icache_entry_t* refill_icache(reg_t addr, icache_entry_t* entry) + { + auto tlb_entry = translate_insn_addr(addr); + insn_bits_t insn = from_le(*(uint16_t*)(tlb_entry.host_offset + addr)); + int length = insn_length(insn); + + if (likely(length == 4)) { + insn |= (insn_bits_t)from_le(*(const int16_t*)translate_insn_addr_to_host(addr + 2)) << 16; + } else if (length == 2) { + insn = (int16_t)insn; + } else if (length == 6) { + insn |= (insn_bits_t)from_le(*(const int16_t*)translate_insn_addr_to_host(addr + 4)) << 32; + insn |= (insn_bits_t)from_le(*(const uint16_t*)translate_insn_addr_to_host(addr + 2)) << 16; + } else { + static_assert(sizeof(insn_bits_t) == 8, "insn_bits_t must be uint64_t"); + insn |= (insn_bits_t)from_le(*(const int16_t*)translate_insn_addr_to_host(addr + 6)) << 48; + insn |= (insn_bits_t)from_le(*(const uint16_t*)translate_insn_addr_to_host(addr + 4)) << 32; + insn |= (insn_bits_t)from_le(*(const uint16_t*)translate_insn_addr_to_host(addr + 2)) << 16; + } + + insn_fetch_t fetch = {proc->decode_insn(insn), insn}; + entry->tag = addr; + entry->next = &icache[icache_index(addr + length)]; + entry->data = fetch; + + reg_t paddr = tlb_entry.target_offset + addr;; + if (tracer.interested_in_range(paddr, paddr + 1, FETCH)) { + entry->tag = -1; + tracer.trace(paddr, length, FETCH); + } + return entry; + } + + inline icache_entry_t* access_icache(reg_t addr) + { + icache_entry_t* entry = &icache[icache_index(addr)]; + if (likely(entry->tag == addr)) + return entry; + return refill_icache(addr, entry); + } + + inline insn_fetch_t load_insn(reg_t addr) + { + icache_entry_t entry; + return refill_icache(addr, &entry)->data; + } + + void flush_tlb(); + void flush_icache(); + + void register_memtracer(memtracer_t*); + + int is_dirty_enabled() + { +#ifdef RISCV_ENABLE_DIRTY + return 1; +#else + return 0; +#endif + } + + int is_misaligned_enabled() + { +#ifdef RISCV_ENABLE_MISALIGNED + return 1; +#else + return 0; +#endif + } + + void set_target_big_endian(bool enable) + { +#ifdef RISCV_ENABLE_DUAL_ENDIAN + target_big_endian = enable; +#else + assert(enable == false); +#endif + } + + bool is_target_big_endian() + { + return target_big_endian; + } + + template inline T from_target(target_endian n) const + { + return target_big_endian? n.from_be() : n.from_le(); + } + + template inline target_endian to_target(T n) const + { + return target_big_endian? target_endian::to_be(n) : target_endian::to_le(n); + } + + void set_cache_blocksz(uint64_t size) + { + blocksz = size; + } + +private: + simif_t* sim; + processor_t* proc; + memtracer_list_t tracer; + reg_t load_reservation_address; + uint16_t fetch_temp; + uint64_t blocksz; + + // implement an instruction cache for simulator performance + icache_entry_t icache[ICACHE_ENTRIES]; + + // implement a TLB for simulator performance + static const reg_t TLB_ENTRIES = 256; + // If a TLB tag has TLB_CHECK_TRIGGERS set, then the MMU must check for a + // trigger match before completing an access. + static const reg_t TLB_CHECK_TRIGGERS = reg_t(1) << 63; + tlb_entry_t tlb_data[TLB_ENTRIES]; + reg_t tlb_insn_tag[TLB_ENTRIES]; + reg_t tlb_load_tag[TLB_ENTRIES]; + reg_t tlb_store_tag[TLB_ENTRIES]; + + // finish translation on a TLB miss and update the TLB + tlb_entry_t refill_tlb(reg_t vaddr, reg_t paddr, char* host_addr, access_type type); + const char* fill_from_mmio(reg_t vaddr, reg_t paddr); + + // perform a stage2 translation for a given guest address + reg_t s2xlate(reg_t gva, reg_t gpa, access_type type, access_type trap_type, bool virt, bool hlvx); + + // perform a page table walk for a given VA; set referenced/dirty bits + reg_t walk(reg_t addr, access_type type, reg_t prv, bool virt, bool hlvx); + + // handle uncommon cases: TLB misses, page faults, MMIO + tlb_entry_t fetch_slow_path(reg_t addr); + void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags); + void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store); + bool mmio_load(reg_t addr, size_t len, uint8_t* bytes); + bool mmio_store(reg_t addr, size_t len, const uint8_t* bytes); + bool mmio_ok(reg_t addr, access_type type); + reg_t translate(reg_t addr, reg_t len, access_type type, uint32_t xlate_flags); + + // ITLB lookup + inline tlb_entry_t translate_insn_addr(reg_t addr) { + reg_t vpn = addr >> PGSHIFT; + if (likely(tlb_insn_tag[vpn % TLB_ENTRIES] == vpn)) + return tlb_data[vpn % TLB_ENTRIES]; + tlb_entry_t result; + if (unlikely(tlb_insn_tag[vpn % TLB_ENTRIES] != (vpn | TLB_CHECK_TRIGGERS))) { + result = fetch_slow_path(addr); + } else { + result = tlb_data[vpn % TLB_ENTRIES]; + } + if (unlikely(tlb_insn_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { + target_endian* ptr = (target_endian*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr); + triggers::action_t action; + auto match = proc->TM.memory_access_match(&action, triggers::OPERATION_EXECUTE, addr, from_target(*ptr)); + if (match != triggers::MATCH_NONE) { + throw triggers::matched_t(triggers::OPERATION_EXECUTE, addr, from_target(*ptr), action); + } + } + return result; + } + + inline const uint16_t* translate_insn_addr_to_host(reg_t addr) { + return (uint16_t*)(translate_insn_addr(addr).host_offset + addr); + } + + inline triggers::matched_t *trigger_exception(triggers::operation_t operation, + reg_t address, reg_t data) + { + if (!proc) { + return NULL; + } + triggers::action_t action; + auto match = proc->TM.memory_access_match(&action, operation, address, data); + if (match == triggers::MATCH_NONE) + return NULL; + if (match == triggers::MATCH_FIRE_BEFORE) { + throw triggers::matched_t(operation, address, data, action); + } + return new triggers::matched_t(operation, address, data, action); + } + + reg_t pmp_homogeneous(reg_t addr, reg_t len); + bool pmp_ok(reg_t addr, reg_t len, access_type type, reg_t mode); + +#ifdef RISCV_ENABLE_DUAL_ENDIAN + bool target_big_endian; +#else + static const bool target_big_endian = false; +#endif + bool check_triggers_fetch; + bool check_triggers_load; + bool check_triggers_store; + // The exception describing a matched trigger, or NULL. + triggers::matched_t *matched_trigger; + + friend class processor_t; +}; + +struct vm_info { + int levels; + int idxbits; + int widenbits; + int ptesize; + reg_t ptbase; +}; + +inline vm_info decode_vm_info(int xlen, bool stage2, reg_t prv, reg_t satp) +{ + if (prv == PRV_M) { + return {0, 0, 0, 0, 0}; + } else if (!stage2 && prv <= PRV_S && xlen == 32) { + switch (get_field(satp, SATP32_MODE)) { + case SATP_MODE_OFF: return {0, 0, 0, 0, 0}; + case SATP_MODE_SV32: return {2, 10, 0, 4, (satp & SATP32_PPN) << PGSHIFT}; + default: abort(); + } + } else if (!stage2 && prv <= PRV_S && xlen == 64) { + switch (get_field(satp, SATP64_MODE)) { + case SATP_MODE_OFF: return {0, 0, 0, 0, 0}; + case SATP_MODE_SV39: return {3, 9, 0, 8, (satp & SATP64_PPN) << PGSHIFT}; + case SATP_MODE_SV48: return {4, 9, 0, 8, (satp & SATP64_PPN) << PGSHIFT}; + case SATP_MODE_SV57: return {5, 9, 0, 8, (satp & SATP64_PPN) << PGSHIFT}; + case SATP_MODE_SV64: return {6, 9, 0, 8, (satp & SATP64_PPN) << PGSHIFT}; + default: abort(); + } + } else if (stage2 && xlen == 32) { + switch (get_field(satp, HGATP32_MODE)) { + case HGATP_MODE_OFF: return {0, 0, 0, 0, 0}; + case HGATP_MODE_SV32X4: return {2, 10, 2, 4, (satp & HGATP32_PPN) << PGSHIFT}; + default: abort(); + } + } else if (stage2 && xlen == 64) { + switch (get_field(satp, HGATP64_MODE)) { + case HGATP_MODE_OFF: return {0, 0, 0, 0, 0}; + case HGATP_MODE_SV39X4: return {3, 9, 2, 8, (satp & HGATP64_PPN) << PGSHIFT}; + case HGATP_MODE_SV48X4: return {4, 9, 2, 8, (satp & HGATP64_PPN) << PGSHIFT}; + case HGATP_MODE_SV57X4: return {5, 9, 2, 8, (satp & HGATP64_PPN) << PGSHIFT}; + default: abort(); + } + } else { + abort(); + } +} + +#endif diff --git a/vendor/riscv-isa-sim/riscv/opcodes.h b/vendor/riscv-isa-sim/riscv/opcodes.h new file mode 100644 index 00000000..065934a2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/opcodes.h @@ -0,0 +1,249 @@ +#include "encoding.h" + +#define ZERO 0 +#define T0 5 +#define S0 8 +#define S1 9 + +static uint32_t bits(uint32_t value, unsigned int hi, unsigned int lo) { + return (value >> lo) & ((1 << (hi+1-lo)) - 1); +} + +static uint32_t bit(uint32_t value, unsigned int b) { + return (value >> b) & 1; +} + +static uint32_t jal(unsigned int rd, uint32_t imm) __attribute__ ((unused)); +static uint32_t jal(unsigned int rd, uint32_t imm) { + return (bit(imm, 20) << 31) | + (bits(imm, 10, 1) << 21) | + (bit(imm, 11) << 20) | + (bits(imm, 19, 12) << 12) | + (rd << 7) | + MATCH_JAL; +} + +static uint32_t csrsi(unsigned int csr, uint16_t imm) __attribute__ ((unused)); +static uint32_t csrsi(unsigned int csr, uint16_t imm) { + return (csr << 20) | + (bits(imm, 4, 0) << 15) | + MATCH_CSRRSI; +} + +static uint32_t sw(unsigned int src, unsigned int base, uint16_t offset) __attribute__ ((unused)); +static uint32_t sw(unsigned int src, unsigned int base, uint16_t offset) +{ + return (bits(offset, 11, 5) << 25) | + (src << 20) | + (base << 15) | + (bits(offset, 4, 0) << 7) | + MATCH_SW; +} + +static uint32_t sd(unsigned int src, unsigned int base, uint16_t offset) __attribute__ ((unused)); +static uint32_t sd(unsigned int src, unsigned int base, uint16_t offset) +{ + return (bits(offset, 11, 5) << 25) | + (src << 20) | + (base << 15) | + (bits(offset, 4, 0) << 7) | + MATCH_SD; +} + +static uint32_t sh(unsigned int src, unsigned int base, uint16_t offset) __attribute__ ((unused)); +static uint32_t sh(unsigned int src, unsigned int base, uint16_t offset) +{ + return (bits(offset, 11, 5) << 25) | + (src << 20) | + (base << 15) | + (bits(offset, 4, 0) << 7) | + MATCH_SH; +} + +static uint32_t sb(unsigned int src, unsigned int base, uint16_t offset) __attribute__ ((unused)); +static uint32_t sb(unsigned int src, unsigned int base, uint16_t offset) +{ + return (bits(offset, 11, 5) << 25) | + (src << 20) | + (base << 15) | + (bits(offset, 4, 0) << 7) | + MATCH_SB; +} + +static uint32_t ld(unsigned int rd, unsigned int base, uint16_t offset) __attribute__ ((unused)); +static uint32_t ld(unsigned int rd, unsigned int base, uint16_t offset) +{ + return (bits(offset, 11, 0) << 20) | + (base << 15) | + (bits(rd, 4, 0) << 7) | + MATCH_LD; +} + +static uint32_t lw(unsigned int rd, unsigned int base, uint16_t offset) __attribute__ ((unused)); +static uint32_t lw(unsigned int rd, unsigned int base, uint16_t offset) +{ + return (bits(offset, 11, 0) << 20) | + (base << 15) | + (bits(rd, 4, 0) << 7) | + MATCH_LW; +} + +static uint32_t lh(unsigned int rd, unsigned int base, uint16_t offset) __attribute__ ((unused)); +static uint32_t lh(unsigned int rd, unsigned int base, uint16_t offset) +{ + return (bits(offset, 11, 0) << 20) | + (base << 15) | + (bits(rd, 4, 0) << 7) | + MATCH_LH; +} + +static uint32_t lb(unsigned int rd, unsigned int base, uint16_t offset) __attribute__ ((unused)); +static uint32_t lb(unsigned int rd, unsigned int base, uint16_t offset) +{ + return (bits(offset, 11, 0) << 20) | + (base << 15) | + (bits(rd, 4, 0) << 7) | + MATCH_LB; +} + +static uint32_t csrw(unsigned int source, unsigned int csr) __attribute__ ((unused)); +static uint32_t csrw(unsigned int source, unsigned int csr) { + return (csr << 20) | (source << 15) | MATCH_CSRRW; +} + +static uint32_t addi(unsigned int dest, unsigned int src, uint16_t imm) __attribute__ ((unused)); +static uint32_t addi(unsigned int dest, unsigned int src, uint16_t imm) +{ + return (bits(imm, 11, 0) << 20) | + (src << 15) | + (dest << 7) | + MATCH_ADDI; +} + +static uint32_t csrr(unsigned int rd, unsigned int csr) __attribute__ ((unused)); +static uint32_t csrr(unsigned int rd, unsigned int csr) { + return (csr << 20) | (rd << 7) | MATCH_CSRRS; +} + +static uint32_t csrrs(unsigned int rd, unsigned int rs1, unsigned int csr) __attribute__ ((unused)); +static uint32_t csrrs(unsigned int rd, unsigned int rs1, unsigned int csr) { + return (csr << 20) | (rs1 << 15) | (rd << 7) | MATCH_CSRRS; +} + +static uint32_t fsw(unsigned int src, unsigned int base, uint16_t offset) __attribute__ ((unused)); +static uint32_t fsw(unsigned int src, unsigned int base, uint16_t offset) +{ + return (bits(offset, 11, 5) << 25) | + (bits(src, 4, 0) << 20) | + (base << 15) | + (bits(offset, 4, 0) << 7) | + MATCH_FSW; +} + +static uint32_t fsd(unsigned int src, unsigned int base, uint16_t offset) __attribute__ ((unused)); +static uint32_t fsd(unsigned int src, unsigned int base, uint16_t offset) +{ + return (bits(offset, 11, 5) << 25) | + (bits(src, 4, 0) << 20) | + (base << 15) | + (bits(offset, 4, 0) << 7) | + MATCH_FSD; +} + +static uint32_t flw(unsigned int dest, unsigned int base, uint16_t offset) __attribute__ ((unused)); +static uint32_t flw(unsigned int dest, unsigned int base, uint16_t offset) +{ + return (bits(offset, 11, 0) << 20) | + (base << 15) | + (bits(dest, 4, 0) << 7) | + MATCH_FLW; +} + +static uint32_t fld(unsigned int dest, unsigned int base, uint16_t offset) __attribute__ ((unused)); +static uint32_t fld(unsigned int dest, unsigned int base, uint16_t offset) +{ + return (bits(offset, 11, 0) << 20) | + (base << 15) | + (bits(dest, 4, 0) << 7) | + MATCH_FLD; +} + +static uint32_t ebreak(void) __attribute__ ((unused)); +static uint32_t ebreak(void) { return MATCH_EBREAK; } +static uint32_t ebreak_c(void) __attribute__ ((unused)); +static uint32_t ebreak_c(void) { return MATCH_C_EBREAK; } + +static uint32_t dret(void) __attribute__ ((unused)); +static uint32_t dret(void) { return MATCH_DRET; } + +static uint32_t fence_i(void) __attribute__ ((unused)); +static uint32_t fence_i(void) +{ + return MATCH_FENCE_I; +} + +static uint32_t lui(unsigned int dest, uint32_t imm) __attribute__ ((unused)); +static uint32_t lui(unsigned int dest, uint32_t imm) +{ + return (bits(imm, 19, 0) << 12) | + (dest << 7) | + MATCH_LUI; +} + +/* +static uint32_t csrci(unsigned int csr, uint16_t imm) __attribute__ ((unused)); +static uint32_t csrci(unsigned int csr, uint16_t imm) { + return (csr << 20) | + (bits(imm, 4, 0) << 15) | + MATCH_CSRRCI; +} + +static uint32_t li(unsigned int dest, uint16_t imm) __attribute__ ((unused)); +static uint32_t li(unsigned int dest, uint16_t imm) +{ + return addi(dest, 0, imm); +} + +static uint32_t fsd(unsigned int src, unsigned int base, uint16_t offset) __attribute__ ((unused)); +static uint32_t fsd(unsigned int src, unsigned int base, uint16_t offset) +{ + return (bits(offset, 11, 5) << 25) | + (bits(src, 4, 0) << 20) | + (base << 15) | + (bits(offset, 4, 0) << 7) | + MATCH_FSD; +} + +static uint32_t ori(unsigned int dest, unsigned int src, uint16_t imm) __attribute__ ((unused)); +static uint32_t ori(unsigned int dest, unsigned int src, uint16_t imm) +{ + return (bits(imm, 11, 0) << 20) | + (src << 15) | + (dest << 7) | + MATCH_ORI; +} + +static uint32_t nop(void) __attribute__ ((unused)); +static uint32_t nop(void) +{ + return addi(0, 0, 0); +} +*/ + +static uint32_t xori(unsigned int dest, unsigned int src, uint16_t imm) __attribute__ ((unused)); +static uint32_t xori(unsigned int dest, unsigned int src, uint16_t imm) +{ + return (bits(imm, 11, 0) << 20) | + (src << 15) | + (dest << 7) | + MATCH_XORI; +} + +static uint32_t srli(unsigned int dest, unsigned int src, uint8_t shamt) __attribute__ ((unused)); +static uint32_t srli(unsigned int dest, unsigned int src, uint8_t shamt) +{ + return (bits(shamt, 4, 0) << 20) | + (src << 15) | + (dest << 7) | + MATCH_SRLI; +} diff --git a/vendor/riscv-isa-sim/riscv/overlap_list.h b/vendor/riscv-isa-sim/riscv/overlap_list.h new file mode 100644 index 00000000..2bc7f42d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/overlap_list.h @@ -0,0 +1,8 @@ +DECLARE_OVERLAP_INSN(c_fsdsp, 'C') +DECLARE_OVERLAP_INSN(c_fsdsp, 'D') +DECLARE_OVERLAP_INSN(c_fld, 'C') +DECLARE_OVERLAP_INSN(c_fld, 'D') +DECLARE_OVERLAP_INSN(c_fldsp, 'C') +DECLARE_OVERLAP_INSN(c_fldsp, 'D') +DECLARE_OVERLAP_INSN(c_fsd, 'C') +DECLARE_OVERLAP_INSN(c_fsd, 'D') diff --git a/vendor/riscv-isa-sim/riscv/platform.h b/vendor/riscv-isa-sim/riscv/platform.h new file mode 100644 index 00000000..6618d44e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/platform.h @@ -0,0 +1,11 @@ +// See LICENSE for license details. +#ifndef _RISCV_PLATFORM_H +#define _RISCV_PLATFORM_H + +#define DEFAULT_RSTVEC 0x00001000 +#define CLINT_BASE 0x02000000 +#define CLINT_SIZE 0x000c0000 +#define EXT_IO_BASE 0x40000000 +#define DRAM_BASE 0x80000000 + +#endif diff --git a/vendor/riscv-isa-sim/riscv/processor.cc b/vendor/riscv-isa-sim/riscv/processor.cc new file mode 100644 index 00000000..9ce9287c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/processor.cc @@ -0,0 +1,1028 @@ +// See LICENSE for license details. + +#include "arith.h" +#include "processor.h" +#include "extension.h" +#include "common.h" +#include "config.h" +#include "simif.h" +#include "mmu.h" +#include "disasm.h" +#include "platform.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef STATE +#define STATE state + +processor_t::processor_t(const isa_parser_t *isa, const char* varch, + simif_t* sim, uint32_t id, bool halt_on_reset, + FILE* log_file, std::ostream& sout_) + : debug(false), halt_request(HR_NONE), isa(isa), sim(sim), id(id), xlen(0), + histogram_enabled(false), log_commits_enabled(false), + log_file(log_file), sout_(sout_.rdbuf()), halt_on_reset(halt_on_reset), + impl_table(256, false), last_pc(1), executions(1), TM(4) +{ + VU.p = this; + TM.proc = this; + +#ifndef __SIZEOF_INT128__ + if (extension_enabled('V')) { + fprintf(stderr, "V extension is not supported on platforms without __int128 type\n"); + abort(); + } +#endif + + parse_varch_string(varch); + + register_base_instructions(); + mmu = new mmu_t(sim, this); + + disassembler = new disassembler_t(isa); + for (auto e : isa->get_extensions()) + register_extension(e.second); + + set_pmp_granularity(1 << PMP_SHIFT); + set_pmp_num(state.max_pmp); + + if (isa->get_max_xlen() == 32) + set_mmu_capability(IMPL_MMU_SV32); + else if (isa->get_max_xlen() == 64) + set_mmu_capability(IMPL_MMU_SV48); + + set_impl(IMPL_MMU_ASID, true); + set_impl(IMPL_MMU_VMID, true); + + reset(); +} + +processor_t::~processor_t() +{ +#ifdef RISCV_ENABLE_HISTOGRAM + if (histogram_enabled) + { + fprintf(stderr, "PC Histogram size:%zu\n", pc_histogram.size()); + for (auto it : pc_histogram) + fprintf(stderr, "%0" PRIx64 " %" PRIu64 "\n", it.first, it.second); + } +#endif + + delete mmu; + delete disassembler; +} + +static void bad_option_string(const char *option, const char *value, + const char *msg) +{ + fprintf(stderr, "error: bad %s option '%s'. %s\n", option, value, msg); + abort(); +} + +static void bad_varch_string(const char* varch, const char *msg) +{ + bad_option_string("--varch", varch, msg); +} + +static std::string get_string_token(std::string str, const char delimiter, size_t& pos) +{ + size_t _pos = pos; + while (pos < str.length() && str[pos] != delimiter) ++pos; + return str.substr(_pos, pos - _pos); +} + +static int get_int_token(std::string str, const char delimiter, size_t& pos) +{ + size_t _pos = pos; + while (pos < str.length() && str[pos] != delimiter) { + if (!isdigit(str[pos])) + bad_varch_string(str.c_str(), "Unsupported value"); // An integer is expected + ++pos; + } + return (pos == _pos) ? 0 : stoi(str.substr(_pos, pos - _pos)); +} + +static bool check_pow2(int val) +{ + return ((val & (val - 1))) == 0; +} + +static std::string strtolower(const char* str) +{ + std::string res; + for (const char *r = str; *r; r++) + res += std::tolower(*r); + return res; +} + +void processor_t::parse_varch_string(const char* s) +{ + std::string str = strtolower(s); + size_t pos = 0; + size_t len = str.length(); + int vlen = 0; + int elen = 0; + int vstart_alu = 0; + + while (pos < len) { + std::string attr = get_string_token(str, ':', pos); + + ++pos; + + if (attr == "vlen") + vlen = get_int_token(str, ',', pos); + else if (attr == "elen") + elen = get_int_token(str, ',', pos); + else if (attr == "vstartalu") + vstart_alu = get_int_token(str, ',', pos); + else + bad_varch_string(s, "Unsupported token"); + + ++pos; + } + + // The integer should be the power of 2 + if (!check_pow2(vlen) || !check_pow2(elen)) { + bad_varch_string(s, "The integer value should be the power of 2"); + } + + /* Vector spec requirements. */ + if (vlen < elen) + bad_varch_string(s, "vlen must be >= elen"); + + /* spike requirements. */ + if (vlen > 4096) + bad_varch_string(s, "vlen must be <= 4096"); + + VU.VLEN = vlen; + VU.ELEN = elen; + VU.vlenb = vlen / 8; + VU.vstart_alu = vstart_alu; +} + +static int xlen_to_uxl(int xlen) +{ + if (xlen == 32) + return 1; + if (xlen == 64) + return 2; + abort(); +} + +void state_t::reset(processor_t* const proc, reg_t max_isa) +{ + pc = DEFAULT_RSTVEC; + XPR.reset(); + FPR.reset(); + + // This assumes xlen is always max_xlen, which is true today (see + // mstatus_csr_t::unlogged_write()): + auto xlen = proc->get_isa().get_max_xlen(); + + prv = PRV_M; + v = false; + csrmap[CSR_MISA] = misa = std::make_shared(proc, CSR_MISA, max_isa); + csrmap[CSR_MSTATUS] = mstatus = std::make_shared(proc, CSR_MSTATUS); + if (xlen == 32) csrmap[CSR_MSTATUSH] = std::make_shared(proc, CSR_MSTATUSH, mstatus); + csrmap[CSR_MEPC] = mepc = std::make_shared(proc, CSR_MEPC); + csrmap[CSR_MTVAL] = mtval = std::make_shared(proc, CSR_MTVAL, 0); + csrmap[CSR_MSCRATCH] = std::make_shared(proc, CSR_MSCRATCH, 0); + csrmap[CSR_MTVEC] = mtvec = std::make_shared(proc, CSR_MTVEC); + csrmap[CSR_MCAUSE] = mcause = std::make_shared(proc, CSR_MCAUSE); + csrmap[CSR_MINSTRET] = minstret = std::make_shared(proc, CSR_MINSTRET); + csrmap[CSR_MCYCLE] = mcycle = std::make_shared(proc, CSR_MCYCLE); + if (proc->extension_enabled_const(EXT_ZICNTR)) { + csrmap[CSR_INSTRET] = std::make_shared(proc, CSR_INSTRET, minstret); + csrmap[CSR_CYCLE] = std::make_shared(proc, CSR_CYCLE, mcycle); + } + if (xlen == 32) { + counter_top_csr_t_p minstreth, mcycleh; + csrmap[CSR_MINSTRETH] = minstreth = std::make_shared(proc, CSR_MINSTRETH, minstret); + csrmap[CSR_MCYCLEH] = mcycleh = std::make_shared(proc, CSR_MCYCLEH, mcycle); + if (proc->extension_enabled_const(EXT_ZICNTR)) { + csrmap[CSR_INSTRETH] = std::make_shared(proc, CSR_INSTRETH, minstreth); + csrmap[CSR_CYCLEH] = std::make_shared(proc, CSR_CYCLEH, mcycleh); + } + } + for (reg_t i = 3; i <= 31; ++i) { + const reg_t which_mevent = CSR_MHPMEVENT3 + i - 3; + const reg_t which_mcounter = CSR_MHPMCOUNTER3 + i - 3; + const reg_t which_mcounterh = CSR_MHPMCOUNTER3H + i - 3; + const reg_t which_counter = CSR_HPMCOUNTER3 + i - 3; + const reg_t which_counterh = CSR_HPMCOUNTER3H + i - 3; + auto mevent = std::make_shared(proc, which_mevent, 0); + auto mcounter = std::make_shared(proc, which_mcounter, 0); + csrmap[which_mevent] = mevent; + csrmap[which_mcounter] = mcounter; + + if (proc->extension_enabled_const(EXT_ZICNTR) && proc->extension_enabled_const(EXT_ZIHPM)) { + auto counter = std::make_shared(proc, which_counter, mcounter); + csrmap[which_counter] = counter; + } + if (xlen == 32) { + auto mcounterh = std::make_shared(proc, which_mcounterh, 0); + csrmap[which_mcounterh] = mcounterh; + if (proc->extension_enabled_const(EXT_ZICNTR) && proc->extension_enabled_const(EXT_ZIHPM)) { + auto counterh = std::make_shared(proc, which_counterh, mcounterh); + csrmap[which_counterh] = counterh; + } + } + } + csrmap[CSR_MCOUNTINHIBIT] = std::make_shared(proc, CSR_MCOUNTINHIBIT, 0); + csrmap[CSR_MIE] = mie = std::make_shared(proc, CSR_MIE); + csrmap[CSR_MIP] = mip = std::make_shared(proc, CSR_MIP); + auto sip_sie_accr = std::make_shared( + this, + ~MIP_HS_MASK, // read_mask + MIP_SSIP, // ip_write_mask + ~MIP_HS_MASK, // ie_write_mask + generic_int_accessor_t::mask_mode_t::MIDELEG, + 0 // shiftamt + ); + + auto hip_hie_accr = std::make_shared( + this, + MIP_HS_MASK, // read_mask + MIP_VSSIP, // ip_write_mask + MIP_HS_MASK, // ie_write_mask + generic_int_accessor_t::mask_mode_t::MIDELEG, + 0 // shiftamt + ); + + auto hvip_accr = std::make_shared( + this, + MIP_VS_MASK, // read_mask + MIP_VS_MASK, // ip_write_mask + MIP_VS_MASK, // ie_write_mask + generic_int_accessor_t::mask_mode_t::NONE, + 0 // shiftamt + ); + + auto vsip_vsie_accr = std::make_shared( + this, + MIP_VS_MASK, // read_mask + MIP_VSSIP, // ip_write_mask + MIP_VS_MASK, // ie_write_mask + generic_int_accessor_t::mask_mode_t::HIDELEG, + 1 // shiftamt + ); + + auto nonvirtual_sip = std::make_shared(proc, CSR_SIP, sip_sie_accr); + auto vsip = std::make_shared(proc, CSR_VSIP, vsip_vsie_accr); + csrmap[CSR_VSIP] = vsip; + csrmap[CSR_SIP] = std::make_shared(proc, nonvirtual_sip, vsip); + csrmap[CSR_HIP] = std::make_shared(proc, CSR_HIP, hip_hie_accr); + csrmap[CSR_HVIP] = std::make_shared(proc, CSR_HVIP, hvip_accr); + + auto nonvirtual_sie = std::make_shared(proc, CSR_SIE, sip_sie_accr); + auto vsie = std::make_shared(proc, CSR_VSIE, vsip_vsie_accr); + csrmap[CSR_VSIE] = vsie; + csrmap[CSR_SIE] = std::make_shared(proc, nonvirtual_sie, vsie); + csrmap[CSR_HIE] = std::make_shared(proc, CSR_HIE, hip_hie_accr); + + csrmap[CSR_MEDELEG] = medeleg = std::make_shared(proc, CSR_MEDELEG); + csrmap[CSR_MIDELEG] = mideleg = std::make_shared(proc, CSR_MIDELEG); + const reg_t counteren_mask = 0xffffffffULL; + mcounteren = std::make_shared(proc, CSR_MCOUNTEREN, counteren_mask, 0); + if (proc->extension_enabled_const('U')) csrmap[CSR_MCOUNTEREN] = mcounteren; + csrmap[CSR_SCOUNTEREN] = scounteren = std::make_shared(proc, CSR_SCOUNTEREN, counteren_mask, 0); + auto nonvirtual_sepc = std::make_shared(proc, CSR_SEPC); + csrmap[CSR_VSEPC] = vsepc = std::make_shared(proc, CSR_VSEPC); + csrmap[CSR_SEPC] = sepc = std::make_shared(proc, nonvirtual_sepc, vsepc); + auto nonvirtual_stval = std::make_shared(proc, CSR_STVAL, 0); + csrmap[CSR_VSTVAL] = vstval = std::make_shared(proc, CSR_VSTVAL, 0); + csrmap[CSR_STVAL] = stval = std::make_shared(proc, nonvirtual_stval, vstval); + auto sscratch = std::make_shared(proc, CSR_SSCRATCH, 0); + auto vsscratch = std::make_shared(proc, CSR_VSSCRATCH, 0); + // Note: if max_isa does not include H, we don't really need this virtualized_csr_t at all (though it doesn't hurt): + csrmap[CSR_SSCRATCH] = std::make_shared(proc, sscratch, vsscratch); + csrmap[CSR_VSSCRATCH] = vsscratch; + auto nonvirtual_stvec = std::make_shared(proc, CSR_STVEC); + csrmap[CSR_VSTVEC] = vstvec = std::make_shared(proc, CSR_VSTVEC); + csrmap[CSR_STVEC] = stvec = std::make_shared(proc, nonvirtual_stvec, vstvec); + auto nonvirtual_satp = std::make_shared(proc, CSR_SATP); + csrmap[CSR_VSATP] = vsatp = std::make_shared(proc, CSR_VSATP); + csrmap[CSR_SATP] = satp = std::make_shared(proc, nonvirtual_satp, vsatp); + auto nonvirtual_scause = std::make_shared(proc, CSR_SCAUSE); + csrmap[CSR_VSCAUSE] = vscause = std::make_shared(proc, CSR_VSCAUSE); + csrmap[CSR_SCAUSE] = scause = std::make_shared(proc, nonvirtual_scause, vscause); + csrmap[CSR_MTVAL2] = mtval2 = std::make_shared(proc, CSR_MTVAL2); + csrmap[CSR_MTINST] = mtinst = std::make_shared(proc, CSR_MTINST); + const reg_t hstatus_init = set_field((reg_t)0, HSTATUS_VSXL, xlen_to_uxl(proc->get_const_xlen())); + const reg_t hstatus_mask = HSTATUS_VTSR | HSTATUS_VTW + | (proc->supports_impl(IMPL_MMU) ? HSTATUS_VTVM : 0) + | HSTATUS_HU | HSTATUS_SPVP | HSTATUS_SPV | HSTATUS_GVA; + csrmap[CSR_HSTATUS] = hstatus = std::make_shared(proc, CSR_HSTATUS, hstatus_mask, hstatus_init); + csrmap[CSR_HGEIE] = std::make_shared(proc, CSR_HGEIE, 0); + csrmap[CSR_HGEIP] = std::make_shared(proc, CSR_HGEIP, 0); + csrmap[CSR_HIDELEG] = hideleg = std::make_shared(proc, CSR_HIDELEG, mideleg); + const reg_t hedeleg_mask = + (1 << CAUSE_MISALIGNED_FETCH) | + (1 << CAUSE_FETCH_ACCESS) | + (1 << CAUSE_ILLEGAL_INSTRUCTION) | + (1 << CAUSE_BREAKPOINT) | + (1 << CAUSE_MISALIGNED_LOAD) | + (1 << CAUSE_LOAD_ACCESS) | + (1 << CAUSE_MISALIGNED_STORE) | + (1 << CAUSE_STORE_ACCESS) | + (1 << CAUSE_USER_ECALL) | + (1 << CAUSE_FETCH_PAGE_FAULT) | + (1 << CAUSE_LOAD_PAGE_FAULT) | + (1 << CAUSE_STORE_PAGE_FAULT); + csrmap[CSR_HEDELEG] = hedeleg = std::make_shared(proc, CSR_HEDELEG, hedeleg_mask, 0); + csrmap[CSR_HCOUNTEREN] = hcounteren = std::make_shared(proc, CSR_HCOUNTEREN, counteren_mask, 0); + csrmap[CSR_HTVAL] = htval = std::make_shared(proc, CSR_HTVAL, 0); + csrmap[CSR_HTINST] = htinst = std::make_shared(proc, CSR_HTINST, 0); + csrmap[CSR_HGATP] = hgatp = std::make_shared(proc, CSR_HGATP); + auto nonvirtual_sstatus = std::make_shared(proc, CSR_SSTATUS, mstatus); + csrmap[CSR_VSSTATUS] = vsstatus = std::make_shared(proc, CSR_VSSTATUS); + csrmap[CSR_SSTATUS] = sstatus = std::make_shared(proc, nonvirtual_sstatus, vsstatus); + + csrmap[CSR_DPC] = dpc = std::make_shared(proc, CSR_DPC); + csrmap[CSR_DSCRATCH0] = std::make_shared(proc, CSR_DSCRATCH0); + csrmap[CSR_DSCRATCH1] = std::make_shared(proc, CSR_DSCRATCH1); + csrmap[CSR_DCSR] = dcsr = std::make_shared(proc, CSR_DCSR); + + csrmap[CSR_TSELECT] = tselect = std::make_shared(proc, CSR_TSELECT); + + csrmap[CSR_TDATA1] = std::make_shared(proc, CSR_TDATA1); + csrmap[CSR_TDATA2] = tdata2 = std::make_shared(proc, CSR_TDATA2); + csrmap[CSR_TDATA3] = std::make_shared(proc, CSR_TDATA3, 0); + debug_mode = false; + single_step = STEP_NONE; + + csrmap[CSR_MSECCFG] = mseccfg = std::make_shared(proc, CSR_MSECCFG); + + for (int i = 0; i < max_pmp; ++i) { + csrmap[CSR_PMPADDR0 + i] = pmpaddr[i] = std::make_shared(proc, CSR_PMPADDR0 + i); + } + for (int i = 0; i < max_pmp; i += xlen / 8) { + reg_t addr = CSR_PMPCFG0 + i / 4; + csrmap[addr] = std::make_shared(proc, addr); + } + + csrmap[CSR_FFLAGS] = fflags = std::make_shared(proc, CSR_FFLAGS, FSR_AEXC >> FSR_AEXC_SHIFT, 0); + csrmap[CSR_FRM] = frm = std::make_shared(proc, CSR_FRM, FSR_RD >> FSR_RD_SHIFT, 0); + assert(FSR_AEXC_SHIFT == 0); // composite_csr_t assumes fflags begins at bit 0 + csrmap[CSR_FCSR] = std::make_shared(proc, CSR_FCSR, frm, fflags, FSR_RD_SHIFT); + + csrmap[CSR_SEED] = std::make_shared(proc, CSR_SEED); + + csrmap[CSR_MARCHID] = std::make_shared(proc, CSR_MARCHID, 5); + csrmap[CSR_MIMPID] = std::make_shared(proc, CSR_MIMPID, 0); + csrmap[CSR_MVENDORID] = std::make_shared(proc, CSR_MVENDORID, 0); + csrmap[CSR_MHARTID] = std::make_shared(proc, CSR_MHARTID, proc->get_id()); + const reg_t menvcfg_mask = (proc->extension_enabled(EXT_ZICBOM) ? MENVCFG_CBCFE | MENVCFG_CBIE : 0) | + (proc->extension_enabled(EXT_ZICBOZ) ? MENVCFG_CBZE : 0); + csrmap[CSR_MENVCFG] = menvcfg = std::make_shared(proc, CSR_MENVCFG, menvcfg_mask, 0); + const reg_t senvcfg_mask = (proc->extension_enabled(EXT_ZICBOM) ? SENVCFG_CBCFE | SENVCFG_CBIE : 0) | + (proc->extension_enabled(EXT_ZICBOZ) ? SENVCFG_CBZE : 0); + csrmap[CSR_SENVCFG] = senvcfg = std::make_shared(proc, CSR_SENVCFG, senvcfg_mask, 0); + const reg_t henvcfg_mask = (proc->extension_enabled(EXT_ZICBOM) ? HENVCFG_CBCFE | HENVCFG_CBIE : 0) | + (proc->extension_enabled(EXT_ZICBOZ) ? HENVCFG_CBZE : 0); + csrmap[CSR_HENVCFG] = henvcfg = std::make_shared(proc, CSR_HENVCFG, henvcfg_mask, 0); + + serialized = false; + +#ifdef RISCV_ENABLE_COMMITLOG + log_reg_write.clear(); + log_mem_read.clear(); + log_mem_write.clear(); + last_inst_priv = 0; + last_inst_xlen = 0; + last_inst_flen = 0; +#endif +} + +void processor_t::vectorUnit_t::reset() +{ + free(reg_file); + VLEN = get_vlen(); + ELEN = get_elen(); + reg_file = malloc(NVPR * vlenb); + memset(reg_file, 0, NVPR * vlenb); + + auto& csrmap = p->get_state()->csrmap; + csrmap[CSR_VXSAT] = vxsat = std::make_shared(p, CSR_VXSAT); + csrmap[CSR_VSTART] = vstart = std::make_shared(p, CSR_VSTART, /*mask*/ VLEN - 1); + csrmap[CSR_VXRM] = vxrm = std::make_shared(p, CSR_VXRM, /*mask*/ 0x3ul); + csrmap[CSR_VL] = vl = std::make_shared(p, CSR_VL, /*mask*/ 0); + csrmap[CSR_VTYPE] = vtype = std::make_shared(p, CSR_VTYPE, /*mask*/ 0); + csrmap[CSR_VLENB] = std::make_shared(p, CSR_VLENB, /*mask*/ 0, /*init*/ vlenb); + assert(VCSR_VXSAT_SHIFT == 0); // composite_csr_t assumes vxsat begins at bit 0 + csrmap[CSR_VCSR] = std::make_shared(p, CSR_VCSR, vxrm, vxsat, VCSR_VXRM_SHIFT); + + vtype->write_raw(0); + set_vl(0, 0, 0, -1); // default to illegal configuration +} + +reg_t processor_t::vectorUnit_t::set_vl(int rd, int rs1, reg_t reqVL, reg_t newType) +{ + int new_vlmul = 0; + if (vtype->read() != newType) { + vtype->write_raw(newType); + vsew = 1 << (extract64(newType, 3, 3) + 3); + new_vlmul = int8_t(extract64(newType, 0, 3) << 5) >> 5; + vflmul = new_vlmul >= 0 ? 1 << new_vlmul : 1.0 / (1 << -new_vlmul); + vlmax = (VLEN/vsew) * vflmul; + vta = extract64(newType, 6, 1); + vma = extract64(newType, 7, 1); + + vill = !(vflmul >= 0.125 && vflmul <= 8) + || vsew > std::min(vflmul, 1.0f) * ELEN + || (newType >> 8) != 0; + + if (vill) { + vlmax = 0; + vtype->write_raw(UINT64_MAX << (p->get_xlen() - 1)); + } + } + + // set vl + if (vlmax == 0) { + vl->write_raw(0); + } else if (rd == 0 && rs1 == 0) { + vl->write_raw(vl->read() > vlmax ? vlmax : vl->read()); + } else if (rd != 0 && rs1 == 0) { + vl->write_raw(vlmax); + } else if (rs1 != 0) { + vl->write_raw(reqVL > vlmax ? vlmax : reqVL); + } + + vstart->write_raw(0); + setvl_count++; + return vl->read(); +} + +void processor_t::set_debug(bool value) +{ + debug = value; + + for (auto e : custom_extensions) + e.second->set_debug(value); +} + +void processor_t::set_histogram(bool value) +{ + histogram_enabled = value; +#ifndef RISCV_ENABLE_HISTOGRAM + if (value) { + fprintf(stderr, "PC Histogram support has not been properly enabled;"); + fprintf(stderr, " please re-build the riscv-isa-sim project using \"configure --enable-histogram\".\n"); + abort(); + } +#endif +} + +#ifdef RISCV_ENABLE_COMMITLOG +void processor_t::enable_log_commits() +{ + log_commits_enabled = true; +} +#endif + +void processor_t::reset() +{ + xlen = isa->get_max_xlen(); + state.reset(this, isa->get_max_isa()); + state.dcsr->halt = halt_on_reset; + halt_on_reset = false; + VU.reset(); + + if (n_pmp > 0) { + // For backwards compatibility with software that is unaware of PMP, + // initialize PMP to permit unprivileged access to all of memory. + put_csr(CSR_PMPADDR0, ~reg_t(0)); + put_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + } + + for (auto e : custom_extensions) // reset any extensions + e.second->reset(); + + if (sim) + sim->proc_reset(id); +} + +extension_t* processor_t::get_extension() +{ + switch (custom_extensions.size()) { + case 0: return NULL; + case 1: return custom_extensions.begin()->second; + default: + fprintf(stderr, "processor_t::get_extension() is ambiguous when multiple extensions\n"); + fprintf(stderr, "are present!\n"); + abort(); + } +} + +extension_t* processor_t::get_extension(const char* name) +{ + auto it = custom_extensions.find(name); + if (it == custom_extensions.end()) + abort(); + return it->second; +} + +void processor_t::set_pmp_num(reg_t n) +{ + // check the number of pmp is in a reasonable range + if (n > state.max_pmp) { + fprintf(stderr, "error: bad number of pmp regions: '%ld' from the dtb\n", (unsigned long)n); + abort(); + } + n_pmp = n; +} + +void processor_t::set_pmp_granularity(reg_t gran) +{ + // check the pmp granularity is set from dtb(!=0) and is power of 2 + if (gran < (1 << PMP_SHIFT) || (gran & (gran - 1)) != 0) { + fprintf(stderr, "error: bad pmp granularity '%ld' from the dtb\n", (unsigned long)gran); + abort(); + } + + lg_pmp_granularity = ctz(gran); +} + +void processor_t::set_mmu_capability(int cap) +{ + switch (cap) { + case IMPL_MMU_SV32: + set_impl(IMPL_MMU_SV32, true); + set_impl(IMPL_MMU, true); + break; + case IMPL_MMU_SV57: + set_impl(IMPL_MMU_SV57, true); + // Fall through + case IMPL_MMU_SV48: + set_impl(IMPL_MMU_SV48, true); + // Fall through + case IMPL_MMU_SV39: + set_impl(IMPL_MMU_SV39, true); + set_impl(IMPL_MMU, true); + break; + default: + set_impl(IMPL_MMU_SV32, false); + set_impl(IMPL_MMU_SV39, false); + set_impl(IMPL_MMU_SV48, false); + set_impl(IMPL_MMU_SV57, false); + set_impl(IMPL_MMU, false); + break; + } +} + +void processor_t::take_interrupt(reg_t pending_interrupts) +{ + // Do nothing if no pending interrupts + if (!pending_interrupts) { + return; + } + + // M-ints have higher priority over HS-ints and VS-ints + const reg_t mie = get_field(state.mstatus->read(), MSTATUS_MIE); + const reg_t m_enabled = state.prv < PRV_M || (state.prv == PRV_M && mie); + reg_t enabled_interrupts = pending_interrupts & ~state.mideleg->read() & -m_enabled; + if (enabled_interrupts == 0) { + // HS-ints have higher priority over VS-ints + const reg_t deleg_to_hs = state.mideleg->read() & ~state.hideleg->read(); + const reg_t sie = get_field(state.sstatus->read(), MSTATUS_SIE); + const reg_t hs_enabled = state.v || state.prv < PRV_S || (state.prv == PRV_S && sie); + enabled_interrupts = pending_interrupts & deleg_to_hs & -hs_enabled; + if (state.v && enabled_interrupts == 0) { + // VS-ints have least priority and can only be taken with virt enabled + const reg_t deleg_to_vs = state.hideleg->read(); + const reg_t vs_enabled = state.prv < PRV_S || (state.prv == PRV_S && sie); + enabled_interrupts = pending_interrupts & deleg_to_vs & -vs_enabled; + } + } + + if (!state.debug_mode && enabled_interrupts) { + // nonstandard interrupts have highest priority + if (enabled_interrupts >> (IRQ_M_EXT + 1)) + enabled_interrupts = enabled_interrupts >> (IRQ_M_EXT + 1) << (IRQ_M_EXT + 1); + // standard interrupt priority is MEI, MSI, MTI, SEI, SSI, STI + else if (enabled_interrupts & MIP_MEIP) + enabled_interrupts = MIP_MEIP; + else if (enabled_interrupts & MIP_MSIP) + enabled_interrupts = MIP_MSIP; + else if (enabled_interrupts & MIP_MTIP) + enabled_interrupts = MIP_MTIP; + else if (enabled_interrupts & MIP_SEIP) + enabled_interrupts = MIP_SEIP; + else if (enabled_interrupts & MIP_SSIP) + enabled_interrupts = MIP_SSIP; + else if (enabled_interrupts & MIP_STIP) + enabled_interrupts = MIP_STIP; + else if (enabled_interrupts & MIP_VSEIP) + enabled_interrupts = MIP_VSEIP; + else if (enabled_interrupts & MIP_VSSIP) + enabled_interrupts = MIP_VSSIP; + else if (enabled_interrupts & MIP_VSTIP) + enabled_interrupts = MIP_VSTIP; + else + abort(); + + throw trap_t(((reg_t)1 << (isa->get_max_xlen() - 1)) | ctz(enabled_interrupts)); + } +} + +reg_t processor_t::legalize_privilege(reg_t prv) +{ + assert(prv <= PRV_M); + + if (!extension_enabled('U')) + return PRV_M; + + if (prv == PRV_HS || (prv == PRV_S && !extension_enabled('S'))) + return PRV_U; + + return prv; +} + +void processor_t::set_privilege(reg_t prv) +{ + mmu->flush_tlb(); + state.prv = legalize_privilege(prv); +} + +void processor_t::set_virt(bool virt) +{ + reg_t tmp, mask; + + if (state.prv == PRV_M) + return; + + if (state.v != virt) { + /* + * Ideally, we should flush TLB here but we don't need it because + * set_virt() is always used in conjucter with set_privilege() and + * set_privilege() will flush TLB unconditionally. + * + * The virtualized sstatus register also relies on this TLB flush, + * since changing V might change sstatus.MXR and sstatus.SUM. + */ + state.v = virt; + } +} + +void processor_t::enter_debug_mode(uint8_t cause) +{ + state.debug_mode = true; + state.dcsr->write_cause_and_prv(cause, state.prv); + set_privilege(PRV_M); + state.dpc->write(state.pc); + state.pc = DEBUG_ROM_ENTRY; +} + +void processor_t::debug_output_log(std::stringstream *s) +{ + if (log_file == stderr) { + std::ostream out(sout_.rdbuf()); + out << s->str(); // handles command line options -d -s -l + } else { + fputs(s->str().c_str(), log_file); // handles command line option --log + } +} + +void processor_t::take_trap(trap_t& t, reg_t epc) +{ + unsigned max_xlen = isa->get_max_xlen(); + + if (debug) { + std::stringstream s; // first put everything in a string, later send it to output + s << "core " << std::dec << std::setfill(' ') << std::setw(3) << id + << ": exception " << t.name() << ", epc 0x" + << std::hex << std::setfill('0') << std::setw(max_xlen/4) << zext(epc, max_xlen) << std::endl; + if (t.has_tval()) + s << "core " << std::dec << std::setfill(' ') << std::setw(3) << id + << ": tval 0x" << std::hex << std::setfill('0') << std::setw(max_xlen / 4) + << zext(t.get_tval(), max_xlen) << std::endl; + debug_output_log(&s); + } + + if (state.debug_mode) { + if (t.cause() == CAUSE_BREAKPOINT) { + state.pc = DEBUG_ROM_ENTRY; + } else { + state.pc = DEBUG_ROM_TVEC; + } + return; + } + + if (t.cause() == CAUSE_BREAKPOINT && ( + (state.prv == PRV_M && state.dcsr->ebreakm) || + (state.prv == PRV_S && state.dcsr->ebreaks) || + (state.prv == PRV_U && state.dcsr->ebreaku))) { + enter_debug_mode(DCSR_CAUSE_SWBP); + return; + } + + // By default, trap to M-mode, unless delegated to HS-mode or VS-mode + reg_t vsdeleg, hsdeleg; + reg_t bit = t.cause(); + bool curr_virt = state.v; + bool interrupt = (bit & ((reg_t)1 << (max_xlen - 1))) != 0; + if (interrupt) { + vsdeleg = (curr_virt && state.prv <= PRV_S) ? state.hideleg->read() : 0; + hsdeleg = (state.prv <= PRV_S) ? state.mideleg->read() : 0; + bit &= ~((reg_t)1 << (max_xlen - 1)); + } else { + vsdeleg = (curr_virt && state.prv <= PRV_S) ? (state.medeleg->read() & state.hedeleg->read()) : 0; + hsdeleg = (state.prv <= PRV_S) ? state.medeleg->read() : 0; + } + if (state.prv <= PRV_S && bit < max_xlen && ((vsdeleg >> bit) & 1)) { + // Handle the trap in VS-mode + reg_t vector = (state.vstvec->read() & 1) && interrupt ? 4 * bit : 0; + state.pc = (state.vstvec->read() & ~(reg_t)1) + vector; + state.vscause->write((interrupt) ? (t.cause() - 1) : t.cause()); + state.vsepc->write(epc); + state.vstval->write(t.get_tval()); + + reg_t s = state.sstatus->read(); + s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE)); + s = set_field(s, MSTATUS_SPP, state.prv); + s = set_field(s, MSTATUS_SIE, 0); + state.sstatus->write(s); + set_privilege(PRV_S); + } else if (state.prv <= PRV_S && bit < max_xlen && ((hsdeleg >> bit) & 1)) { + // Handle the trap in HS-mode + set_virt(false); + reg_t vector = (state.stvec->read() & 1) && interrupt ? 4 * bit : 0; + state.pc = (state.stvec->read() & ~(reg_t)1) + vector; + state.scause->write(t.cause()); + state.sepc->write(epc); + state.stval->write(t.get_tval()); + state.htval->write(t.get_tval2()); + state.htinst->write(t.get_tinst()); + + reg_t s = state.sstatus->read(); + s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE)); + s = set_field(s, MSTATUS_SPP, state.prv); + s = set_field(s, MSTATUS_SIE, 0); + state.sstatus->write(s); + if (extension_enabled('H')) { + s = state.hstatus->read(); + if (curr_virt) + s = set_field(s, HSTATUS_SPVP, state.prv); + s = set_field(s, HSTATUS_SPV, curr_virt); + s = set_field(s, HSTATUS_GVA, t.has_gva()); + state.hstatus->write(s); + } + set_privilege(PRV_S); + } else { + // Handle the trap in M-mode + set_virt(false); + reg_t vector = (state.mtvec->read() & 1) && interrupt ? 4 * bit : 0; + state.pc = (state.mtvec->read() & ~(reg_t)1) + vector; + state.mepc->write(epc); + state.mcause->write(t.cause()); + state.mtval->write(t.get_tval()); + state.mtval2->write(t.get_tval2()); + state.mtinst->write(t.get_tinst()); + + reg_t s = state.mstatus->read(); + s = set_field(s, MSTATUS_MPIE, get_field(s, MSTATUS_MIE)); + s = set_field(s, MSTATUS_MPP, state.prv); + s = set_field(s, MSTATUS_MIE, 0); + s = set_field(s, MSTATUS_MPV, curr_virt); + s = set_field(s, MSTATUS_GVA, t.has_gva()); + state.mstatus->write(s); + set_privilege(PRV_M); + } +} + +void processor_t::disasm(insn_t insn) +{ + uint64_t bits = insn.bits() & ((1ULL << (8 * insn_length(insn.bits()))) - 1); + if (last_pc != state.pc || last_bits != bits) { + std::stringstream s; // first put everything in a string, later send it to output + +#ifdef RISCV_ENABLE_COMMITLOG + const char* sym = get_symbol(state.pc); + if (sym != nullptr) + { + s << "core " << std::dec << std::setfill(' ') << std::setw(3) << id + << ": >>>> " << sym << std::endl; + } +#endif + + if (executions != 1) { + s << "core " << std::dec << std::setfill(' ') << std::setw(3) << id + << ": Executed " << executions << " times" << std::endl; + } + + unsigned max_xlen = isa->get_max_xlen(); + + s << "core " << std::dec << std::setfill(' ') << std::setw(3) << id + << std::hex << ": 0x" << std::setfill('0') << std::setw(max_xlen / 4) + << zext(state.pc, max_xlen) << " (0x" << std::setw(8) << bits << ") " + << disassembler->disassemble(insn) << std::endl; + + debug_output_log(&s); + + last_pc = state.pc; + last_bits = bits; + executions = 1; + } else { + executions++; + } +} + +int processor_t::paddr_bits() +{ + unsigned max_xlen = isa->get_max_xlen(); + assert(xlen == max_xlen); + return max_xlen == 64 ? 50 : 34; +} + +void processor_t::put_csr(int which, reg_t val) +{ + val = zext_xlen(val); + auto search = state.csrmap.find(which); + if (search != state.csrmap.end()) { + search->second->write(val); + return; + } +} + +// Note that get_csr is sometimes called when read side-effects should not +// be actioned. In other words, Spike cannot currently support CSRs with +// side effects on reads. +reg_t processor_t::get_csr(int which, insn_t insn, bool write, bool peek) +{ + auto search = state.csrmap.find(which); + if (search != state.csrmap.end()) { + if (!peek) + search->second->verify_permissions(insn, write); + return search->second->read(); + } + // If we get here, the CSR doesn't exist. Unimplemented CSRs always throw + // illegal-instruction exceptions, not virtual-instruction exceptions. + throw trap_illegal_instruction(insn.bits()); +} + +reg_t illegal_instruction(processor_t* p, insn_t insn, reg_t pc) +{ + throw trap_illegal_instruction(insn.bits()); +} + +insn_func_t processor_t::decode_insn(insn_t insn) +{ + // look up opcode in hash table + size_t idx = insn.bits() % OPCODE_CACHE_SIZE; + insn_desc_t desc = opcode_cache[idx]; + + bool rve = extension_enabled('E'); + + if (unlikely(insn.bits() != desc.match || !desc.func(xlen, rve))) { + // fall back to linear search + int cnt = 0; + insn_desc_t* p = &instructions[0]; + while ((insn.bits() & p->mask) != p->match || !desc.func(xlen, rve)) + p++, cnt++; + desc = *p; + + if (p->mask != 0 && p > &instructions[0]) { + if (p->match != (p - 1)->match && p->match != (p + 1)->match) { + // move to front of opcode list to reduce miss penalty + while (--p >= &instructions[0]) + *(p + 1) = *p; + instructions[0] = desc; + } + } + + opcode_cache[idx] = desc; + opcode_cache[idx].match = insn.bits(); + } + + return desc.func(xlen, rve); +} + +void processor_t::register_insn(insn_desc_t desc) +{ + instructions.push_back(desc); +} + +void processor_t::build_opcode_map() +{ + struct cmp { + bool operator()(const insn_desc_t& lhs, const insn_desc_t& rhs) { + if (lhs.match == rhs.match) + return lhs.mask > rhs.mask; + return lhs.match > rhs.match; + } + }; + std::sort(instructions.begin(), instructions.end(), cmp()); + + for (size_t i = 0; i < OPCODE_CACHE_SIZE; i++) + opcode_cache[i] = insn_desc_t::illegal(); +} + +void processor_t::register_extension(extension_t* x) +{ + for (auto insn : x->get_instructions()) + register_insn(insn); + build_opcode_map(); + + for (auto disasm_insn : x->get_disasms()) + disassembler->add_insn(disasm_insn); + + if (!custom_extensions.insert(std::make_pair(x->name(), x)).second) { + fprintf(stderr, "extensions must have unique names (got two named \"%s\"!)\n", x->name()); + abort(); + } + x->set_processor(this); +} + +void processor_t::register_base_instructions() +{ + #define DECLARE_INSN(name, match, mask) \ + insn_bits_t name##_match = (match), name##_mask = (mask); \ + bool name##_supported = true; + + #include "encoding.h" + #undef DECLARE_INSN + + #define DECLARE_OVERLAP_INSN(name, ext) { name##_supported &= isa->extension_enabled(ext); } + #include "overlap_list.h" + #undef DECLARE_OVERLAP_INSN + + #define DEFINE_INSN(name) \ + extern reg_t rv32i_##name(processor_t*, insn_t, reg_t); \ + extern reg_t rv64i_##name(processor_t*, insn_t, reg_t); \ + extern reg_t rv32e_##name(processor_t*, insn_t, reg_t); \ + extern reg_t rv64e_##name(processor_t*, insn_t, reg_t); \ + register_insn((insn_desc_t) { \ + name##_supported, \ + name##_match, \ + name##_mask, \ + rv32i_##name, \ + rv64i_##name, \ + rv32e_##name, \ + rv64e_##name}); + #include "insn_list.h" + #undef DEFINE_INSN + + // terminate instruction list with a catch-all + register_insn(insn_desc_t::illegal()); + + build_opcode_map(); +} + +bool processor_t::load(reg_t addr, size_t len, uint8_t* bytes) +{ + switch (addr) + { + case 0: + if (len <= 4) { + memset(bytes, 0, len); + bytes[0] = get_field(state.mip->read(), MIP_MSIP); + return true; + } + break; + } + + return false; +} + +bool processor_t::store(reg_t addr, size_t len, const uint8_t* bytes) +{ + switch (addr) + { + case 0: + if (len <= 4) { + state.mip->write_with_mask(MIP_MSIP, bytes[0] << IRQ_M_SOFT); + return true; + } + break; + } + + return false; +} + +void processor_t::trigger_updated(const std::vector &triggers) +{ + mmu->flush_tlb(); + mmu->check_triggers_fetch = false; + mmu->check_triggers_load = false; + mmu->check_triggers_store = false; + + for (auto trigger : triggers) { + if (trigger->execute()) { + mmu->check_triggers_fetch = true; + } + if (trigger->load()) { + mmu->check_triggers_load = true; + } + if (trigger->store()) { + mmu->check_triggers_store = true; + } + } +} diff --git a/vendor/riscv-isa-sim/riscv/processor.h b/vendor/riscv-isa-sim/riscv/processor.h new file mode 100644 index 00000000..96fdc54c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/processor.h @@ -0,0 +1,469 @@ +// See LICENSE for license details. +#ifndef _RISCV_PROCESSOR_H +#define _RISCV_PROCESSOR_H + +#include "decode.h" +#include "config.h" +#include "trap.h" +#include "abstract_device.h" +#include +#include +#include +#include +#include +#include "debug_rom_defines.h" +#include "entropy_source.h" +#include "csrs.h" +#include "isa_parser.h" +#include "triggers.h" + +class processor_t; +class mmu_t; +typedef reg_t (*insn_func_t)(processor_t*, insn_t, reg_t); +class simif_t; +class trap_t; +class extension_t; +class disassembler_t; + +reg_t illegal_instruction(processor_t* p, insn_t insn, reg_t pc); + +struct insn_desc_t +{ + bool supported; + insn_bits_t match; + insn_bits_t mask; + insn_func_t rv32i; + insn_func_t rv64i; + insn_func_t rv32e; + insn_func_t rv64e; + + insn_func_t func(int xlen, bool rve) + { + if (!supported) + return NULL; + + if (rve) + return xlen == 64 ? rv64e : rv32e; + else + return xlen == 64 ? rv64i : rv32i; + } + + static insn_desc_t illegal() + { + return {true, 0, 0, &illegal_instruction, &illegal_instruction, &illegal_instruction, &illegal_instruction}; + } +}; + +// regnum, data +typedef std::unordered_map commit_log_reg_t; + +// addr, value, size +typedef std::vector> commit_log_mem_t; + +enum VRM{ + RNU = 0, + RNE, + RDN, + ROD, + INVALID_RM +}; + +template +struct type_usew_t; + +template<> +struct type_usew_t<8> +{ + using type=uint8_t; +}; + +template<> +struct type_usew_t<16> +{ + using type=uint16_t; +}; + +template<> +struct type_usew_t<32> +{ + using type=uint32_t; +}; + +template<> +struct type_usew_t<64> +{ + using type=uint64_t; +}; + +template +struct type_sew_t; + +template<> +struct type_sew_t<8> +{ + using type=int8_t; +}; + +template<> +struct type_sew_t<16> +{ + using type=int16_t; +}; + +template<> +struct type_sew_t<32> +{ + using type=int32_t; +}; + +template<> +struct type_sew_t<64> +{ + using type=int64_t; +}; + + +// architectural state of a RISC-V hart +struct state_t +{ + void reset(processor_t* const proc, reg_t max_isa); + + reg_t pc; + regfile_t XPR; + regfile_t FPR; + + // control and status registers + std::unordered_map csrmap; + reg_t prv; // TODO: Can this be an enum instead? + bool v; + misa_csr_t_p misa; + mstatus_csr_t_p mstatus; + csr_t_p mepc; + csr_t_p mtval; + csr_t_p mtvec; + csr_t_p mcause; + wide_counter_csr_t_p minstret; + wide_counter_csr_t_p mcycle; + mie_csr_t_p mie; + mip_csr_t_p mip; + csr_t_p medeleg; + csr_t_p mideleg; + csr_t_p mcounteren; + csr_t_p scounteren; + csr_t_p sepc; + csr_t_p stval; + csr_t_p stvec; + virtualized_csr_t_p satp; + csr_t_p scause; + + csr_t_p mtval2; + csr_t_p mtinst; + csr_t_p hstatus; + csr_t_p hideleg; + csr_t_p hedeleg; + csr_t_p hcounteren; + csr_t_p htval; + csr_t_p htinst; + csr_t_p hgatp; + sstatus_csr_t_p sstatus; + vsstatus_csr_t_p vsstatus; + csr_t_p vstvec; + csr_t_p vsepc; + csr_t_p vscause; + csr_t_p vstval; + csr_t_p vsatp; + + csr_t_p dpc; + dcsr_csr_t_p dcsr; + csr_t_p tselect; + tdata2_csr_t_p tdata2; + bool debug_mode; + + mseccfg_csr_t_p mseccfg; + + static const int max_pmp = 16; + pmpaddr_csr_t_p pmpaddr[max_pmp]; + + float_csr_t_p fflags; + float_csr_t_p frm; + + csr_t_p menvcfg; + csr_t_p senvcfg; + csr_t_p henvcfg; + + bool serialized; // whether timer CSRs are in a well-defined state + + // When true, execute a single instruction and then enter debug mode. This + // can only be set by executing dret. + enum { + STEP_NONE, + STEP_STEPPING, + STEP_STEPPED + } single_step; + +#ifdef RISCV_ENABLE_COMMITLOG + commit_log_reg_t log_reg_write; + commit_log_mem_t log_mem_read; + commit_log_mem_t log_mem_write; + reg_t last_inst_priv; + int last_inst_xlen; + int last_inst_flen; +#endif +}; + +typedef enum { + OPERATION_EXECUTE, + OPERATION_STORE, + OPERATION_LOAD, +} trigger_operation_t; + +// Count number of contiguous 1 bits starting from the LSB. +static int cto(reg_t val) +{ + int res = 0; + while ((val & 1) == 1) + val >>= 1, res++; + return res; +} + +// this class represents one processor in a RISC-V machine. +class processor_t : public abstract_device_t +{ +public: + processor_t(const isa_parser_t *isa, const char* varch, + simif_t* sim, uint32_t id, bool halt_on_reset, + FILE *log_file, std::ostream& sout_); // because of command line option --log and -s we need both + ~processor_t(); + + const isa_parser_t &get_isa() { return *isa; } + + void set_debug(bool value); + void set_histogram(bool value); +#ifdef RISCV_ENABLE_COMMITLOG + void enable_log_commits(); + bool get_log_commits_enabled() const { return log_commits_enabled; } +#endif + void reset(); + void step(size_t n); // run for n cycles + void put_csr(int which, reg_t val); + uint32_t get_id() const { return id; } + reg_t get_csr(int which, insn_t insn, bool write, bool peek = 0); + reg_t get_csr(int which) { return get_csr(which, insn_t(0), false, true); } + mmu_t* get_mmu() { return mmu; } + state_t* get_state() { return &state; } + unsigned get_xlen() const { return xlen; } + unsigned get_const_xlen() const { + // Any code that assumes a const xlen should use this method to + // document that assumption. If Spike ever changes to allow + // variable xlen, this method should be removed. + return xlen; + } + unsigned get_flen() const { + return extension_enabled('Q') ? 128 : + extension_enabled('D') ? 64 : + extension_enabled('F') ? 32 : 0; + } + extension_t* get_extension(); + extension_t* get_extension(const char* name); + bool any_custom_extensions() const { + return !custom_extensions.empty(); + } + bool extension_enabled(unsigned char ext) const { + if (ext >= 'A' && ext <= 'Z') + return state.misa->extension_enabled(ext); + else + return isa->extension_enabled(ext); + } + // Is this extension enabled? and abort if this extension can + // possibly be disabled dynamically. Useful for documenting + // assumptions about writable misa bits. + bool extension_enabled_const(unsigned char ext) const { + if (ext >= 'A' && ext <= 'Z') + return state.misa->extension_enabled_const(ext); + else + return isa->extension_enabled(ext); // assume this can't change + } + void set_impl(uint8_t impl, bool val) { impl_table[impl] = val; } + bool supports_impl(uint8_t impl) const { + return impl_table[impl]; + } + reg_t pc_alignment_mask() { + return ~(reg_t)(extension_enabled('C') ? 0 : 2); + } + void check_pc_alignment(reg_t pc) { + if (unlikely(pc & ~pc_alignment_mask())) + throw trap_instruction_address_misaligned(state.v, pc, 0, 0); + } + reg_t legalize_privilege(reg_t); + void set_privilege(reg_t); + void set_virt(bool); + void update_histogram(reg_t pc); + const disassembler_t* get_disassembler() { return disassembler; } + + FILE *get_log_file() { return log_file; } + + void register_insn(insn_desc_t); + void register_extension(extension_t*); + + // MMIO slave interface + bool load(reg_t addr, size_t len, uint8_t* bytes); + bool store(reg_t addr, size_t len, const uint8_t* bytes); + + // When true, display disassembly of each instruction that's executed. + bool debug; + // When true, take the slow simulation path. + bool slow_path(); + bool halted() { return state.debug_mode; } + enum { + HR_NONE, /* Halt request is inactive. */ + HR_REGULAR, /* Regular halt request/debug interrupt. */ + HR_GROUP /* Halt requested due to halt group. */ + } halt_request; + + void trigger_updated(const std::vector &triggers); + + void set_pmp_num(reg_t pmp_num); + void set_pmp_granularity(reg_t pmp_granularity); + void set_mmu_capability(int cap); + + const char* get_symbol(uint64_t addr); + +private: + const isa_parser_t * const isa; + + simif_t* sim; + mmu_t* mmu; // main memory is always accessed via the mmu + std::unordered_map custom_extensions; + disassembler_t* disassembler; + state_t state; + uint32_t id; + unsigned xlen; + bool histogram_enabled; + bool log_commits_enabled; + FILE *log_file; + std::ostream sout_; // needed for socket command interface -s, also used for -d and -l, but not for --log + bool halt_on_reset; + std::vector impl_table; + + std::vector instructions; + std::map pc_histogram; + + static const size_t OPCODE_CACHE_SIZE = 8191; + insn_desc_t opcode_cache[OPCODE_CACHE_SIZE]; + + void take_pending_interrupt() { take_interrupt(state.mip->read() & state.mie->read()); } + void take_interrupt(reg_t mask); // take first enabled interrupt in mask + void take_trap(trap_t& t, reg_t epc); // take an exception + void disasm(insn_t insn); // disassemble and print an instruction + int paddr_bits(); + + void enter_debug_mode(uint8_t cause); + + void debug_output_log(std::stringstream *s); // either output to interactive user or write to log file + + friend class mmu_t; + friend class clint_t; + friend class extension_t; + + void parse_varch_string(const char*); + void parse_priv_string(const char*); + void build_opcode_map(); + void register_base_instructions(); + insn_func_t decode_insn(insn_t insn); + + // Track repeated executions for processor_t::disasm() + uint64_t last_pc, last_bits, executions; +public: + entropy_source es; // Crypto ISE Entropy source. + + reg_t n_pmp; + reg_t lg_pmp_granularity; + reg_t pmp_tor_mask() { return -(reg_t(1) << (lg_pmp_granularity - PMP_SHIFT)); } + + class vectorUnit_t { + public: + processor_t* p; + void *reg_file; + char reg_referenced[NVPR]; + int setvl_count; + reg_t vlmax; + reg_t vlenb; + csr_t_p vxsat; + vector_csr_t_p vxrm, vstart, vl, vtype; + reg_t vma, vta; + reg_t vsew; + float vflmul; + reg_t ELEN, VLEN; + bool vill; + bool vstart_alu; + + // vector element for varies SEW + template + T& elt(reg_t vReg, reg_t n, bool is_write = false){ + assert(vsew != 0); + assert((VLEN >> 3)/sizeof(T) > 0); + reg_t elts_per_reg = (VLEN >> 3) / (sizeof(T)); + vReg += n / elts_per_reg; + n = n % elts_per_reg; +#ifdef WORDS_BIGENDIAN + // "V" spec 0.7.1 requires lower indices to map to lower significant + // bits when changing SEW, thus we need to index from the end on BE. + n ^= elts_per_reg - 1; +#endif + reg_referenced[vReg] = 1; + +#ifdef RISCV_ENABLE_COMMITLOG + if (is_write) + p->get_state()->log_reg_write[((vReg) << 4) | 2] = {0, 0}; +#endif + + T *regStart = (T*)((char*)reg_file + vReg * (VLEN >> 3)); + return regStart[n]; + } + public: + + void reset(); + + vectorUnit_t(): + p(0), + reg_file(0), + reg_referenced{0}, + setvl_count(0), + vlmax(0), + vlenb(0), + vxsat(0), + vxrm(0), + vstart(0), + vl(0), + vtype(0), + vma(0), + vta(0), + vsew(0), + vflmul(0), + ELEN(0), + VLEN(0), + vill(false), + vstart_alu(false) { + } + + ~vectorUnit_t(){ + free(reg_file); + reg_file = 0; + } + + reg_t set_vl(int rd, int rs1, reg_t reqVL, reg_t newType); + + reg_t get_vlen() { return VLEN; } + reg_t get_elen() { return ELEN; } + reg_t get_slen() { return VLEN; } + + VRM get_vround_mode() { + return (VRM)(vxrm->read()); + } + }; + + vectorUnit_t VU; + triggers::module_t TM; +}; + +#endif diff --git a/vendor/riscv-isa-sim/riscv/remote_bitbang.cc b/vendor/riscv-isa-sim/riscv/remote_bitbang.cc new file mode 100644 index 00000000..8453e85a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/remote_bitbang.cc @@ -0,0 +1,187 @@ +#include +#include +#include +#include +#include +#include + +#ifndef AF_INET +#include +#endif +#ifndef INADDR_ANY +#include +#endif + +#include +#include +#include + +#include "remote_bitbang.h" + +#if 1 +# define D(x) x +#else +# define D(x) +#endif + +/////////// remote_bitbang_t + +remote_bitbang_t::remote_bitbang_t(uint16_t port, jtag_dtm_t *tap) : + tap(tap), + socket_fd(0), + client_fd(0), + recv_start(0), + recv_end(0) +{ + socket_fd = socket(AF_INET, SOCK_STREAM, 0); + if (socket_fd == -1) { + fprintf(stderr, "remote_bitbang failed to make socket: %s (%d)\n", + strerror(errno), errno); + abort(); + } + + fcntl(socket_fd, F_SETFL, O_NONBLOCK); + int reuseaddr = 1; + if (setsockopt(socket_fd, SOL_SOCKET, SO_REUSEADDR, &reuseaddr, + sizeof(int)) == -1) { + fprintf(stderr, "remote_bitbang failed setsockopt: %s (%d)\n", + strerror(errno), errno); + abort(); + } + + struct sockaddr_in addr; + memset(&addr, 0, sizeof(addr)); + addr.sin_family = AF_INET; + addr.sin_addr.s_addr = INADDR_ANY; + addr.sin_port = htons(port); + + if (bind(socket_fd, (struct sockaddr *) &addr, sizeof(addr)) == -1) { + fprintf(stderr, "remote_bitbang failed to bind socket: %s (%d)\n", + strerror(errno), errno); + abort(); + } + + if (listen(socket_fd, 1) == -1) { + fprintf(stderr, "remote_bitbang failed to listen on socket: %s (%d)\n", + strerror(errno), errno); + abort(); + } + + socklen_t addrlen = sizeof(addr); + if (getsockname(socket_fd, (struct sockaddr *) &addr, &addrlen) == -1) { + fprintf(stderr, "remote_bitbang getsockname failed: %s (%d)\n", + strerror(errno), errno); + abort(); + } + + printf("Listening for remote bitbang connection on port %d.\n", + ntohs(addr.sin_port)); + fflush(stdout); +} + +void remote_bitbang_t::accept() +{ + client_fd = ::accept(socket_fd, NULL, NULL); + if (client_fd == -1) { + if (errno == EAGAIN) { + // No client waiting to connect right now. + } else { + fprintf(stderr, "failed to accept on socket: %s (%d)\n", strerror(errno), + errno); + abort(); + } + } else { + fcntl(client_fd, F_SETFL, O_NONBLOCK); + } +} + +void remote_bitbang_t::tick() +{ + if (client_fd > 0) { + execute_commands(); + } else { + this->accept(); + } +} + +void remote_bitbang_t::execute_commands() +{ + static char send_buf[buf_size]; + unsigned total_processed = 0; + bool quit = false; + bool in_rti = tap->state() == RUN_TEST_IDLE; + bool entered_rti = false; + while (1) { + if (recv_start < recv_end) { + unsigned send_offset = 0; + while (recv_start < recv_end) { + uint8_t command = recv_buf[recv_start]; + + switch (command) { + case 'B': /* fprintf(stderr, "*BLINK*\n"); */ break; + case 'b': /* fprintf(stderr, "_______\n"); */ break; + case 'r': tap->reset(); break; + case '0': tap->set_pins(0, 0, 0); break; + case '1': tap->set_pins(0, 0, 1); break; + case '2': tap->set_pins(0, 1, 0); break; + case '3': tap->set_pins(0, 1, 1); break; + case '4': tap->set_pins(1, 0, 0); break; + case '5': tap->set_pins(1, 0, 1); break; + case '6': tap->set_pins(1, 1, 0); break; + case '7': tap->set_pins(1, 1, 1); break; + case 'R': send_buf[send_offset++] = tap->tdo() ? '1' : '0'; break; + case 'Q': quit = true; break; + default: + fprintf(stderr, "remote_bitbang got unsupported command '%c'\n", + command); + } + recv_start++; + total_processed++; + if (!in_rti && tap->state() == RUN_TEST_IDLE) { + entered_rti = true; + break; + } + in_rti = false; + } + unsigned sent = 0; + while (sent < send_offset) { + ssize_t bytes = write(client_fd, send_buf + sent, send_offset); + if (bytes == -1) { + fprintf(stderr, "failed to write to socket: %s (%d)\n", strerror(errno), errno); + abort(); + } + sent += bytes; + } + } + + if (total_processed > buf_size || quit || entered_rti) { + // Don't go forever, because that could starve the main simulation. + break; + } + + recv_start = 0; + recv_end = read(client_fd, recv_buf, buf_size); + + if (recv_end == -1) { + if (errno == EAGAIN) { + break; + } else { + fprintf(stderr, "remote_bitbang failed to read on socket: %s (%d)\n", + strerror(errno), errno); + abort(); + } + } + + if (quit) { + fprintf(stderr, "Remote Bitbang received 'Q'\n"); + } + + if (recv_end == 0 || quit) { + // The remote disconnected. + fprintf(stderr, "Received nothing. Quitting.\n"); + close(client_fd); + client_fd = 0; + break; + } + } +} diff --git a/vendor/riscv-isa-sim/riscv/remote_bitbang.h b/vendor/riscv-isa-sim/riscv/remote_bitbang.h new file mode 100644 index 00000000..1db4d550 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/remote_bitbang.h @@ -0,0 +1,34 @@ +#ifndef REMOTE_BITBANG_H +#define REMOTE_BITBANG_H + +#include + +#include "jtag_dtm.h" + +class remote_bitbang_t +{ +public: + // Create a new server, listening for connections from localhost on the given + // port. + remote_bitbang_t(uint16_t port, jtag_dtm_t *tap); + + // Do a bit of work. + void tick(); + +private: + jtag_dtm_t *tap; + + int socket_fd; + int client_fd; + + static const ssize_t buf_size = 64 * 1024; + char recv_buf[buf_size]; + ssize_t recv_start, recv_end; + + // Check for a client connecting, and accept if there is one. + void accept(); + // Execute any commands the client has for us. + void execute_commands(); +}; + +#endif diff --git a/vendor/riscv-isa-sim/riscv/riscv.ac b/vendor/riscv-isa-sim/riscv/riscv.ac new file mode 100644 index 00000000..9d14335d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/riscv.ac @@ -0,0 +1,65 @@ +AC_LANG_CPLUSPLUS + +AX_BOOST_BASE([1.53]) +AX_BOOST_ASIO +AX_BOOST_REGEX + +AC_CHECK_LIB([boost_system], [main], [], []) + +AC_CHECK_LIB([boost_regex], [main], [], []) + +AC_ARG_WITH(isa, + [AS_HELP_STRING([--with-isa=RV64IMAFDC], + [Sets the default RISC-V ISA])], + AC_DEFINE_UNQUOTED([DEFAULT_ISA], "$withval", [Default value for --isa switch]), + AC_DEFINE_UNQUOTED([DEFAULT_ISA], "RV64IMAFDC", [Default value for --isa switch])) + +AC_ARG_WITH(priv, + [AS_HELP_STRING([--with-priv=MSU], + [Sets the default RISC-V privilege modes supported])], + AC_DEFINE_UNQUOTED([DEFAULT_PRIV], "$withval", [Default value for --priv switch]), + AC_DEFINE_UNQUOTED([DEFAULT_PRIV], "MSU", [Default value for --priv switch])) + +AC_ARG_WITH(varch, + [AS_HELP_STRING([--with-varch=vlen:128,elen:64], + [Sets the default vector config])], + AC_DEFINE_UNQUOTED([DEFAULT_VARCH], "$withval", [Default value for --varch switch]), + AC_DEFINE_UNQUOTED([DEFAULT_VARCH], ["vlen:128,elen:64"], [Default value for --varch switch])) + +AC_ARG_WITH(target, + [AS_HELP_STRING([--with-target=riscv64-unknown-elf], + [Sets the default target config])], + AC_DEFINE_UNQUOTED([TARGET_ARCH], "$withval", [Default value for --target switch]), + AC_DEFINE_UNQUOTED([TARGET_ARCH], ["riscv64-unknown-elf"], [Default value for --target switch])) + +AC_SEARCH_LIBS([dlopen], [dl dld], [ + AC_DEFINE([HAVE_DLOPEN], [], [Dynamic library loading is supported]) + AC_SUBST([HAVE_DLOPEN], [yes]) +]) + +AC_CHECK_LIB(pthread, pthread_create, [], [AC_MSG_ERROR([libpthread is required])]) + +AC_ARG_ENABLE([commitlog], AS_HELP_STRING([--enable-commitlog], [Enable commit log generation])) +AS_IF([test "x$enable_commitlog" = "xyes"], [ + AC_DEFINE([RISCV_ENABLE_COMMITLOG],,[Enable commit log generation]) +]) + +AC_ARG_ENABLE([histogram], AS_HELP_STRING([--enable-histogram], [Enable PC histogram generation])) +AS_IF([test "x$enable_histogram" = "xyes"], [ + AC_DEFINE([RISCV_ENABLE_HISTOGRAM],,[Enable PC histogram generation]) +]) + +AC_ARG_ENABLE([dirty], AS_HELP_STRING([--enable-dirty], [Enable hardware management of PTE accessed and dirty bits])) +AS_IF([test "x$enable_dirty" = "xyes"], [ + AC_DEFINE([RISCV_ENABLE_DIRTY],,[Enable hardware management of PTE accessed and dirty bits]) +]) + +AC_ARG_ENABLE([misaligned], AS_HELP_STRING([--enable-misaligned], [Enable hardware support for misaligned loads and stores])) +AS_IF([test "x$enable_misaligned" = "xyes"], [ + AC_DEFINE([RISCV_ENABLE_MISALIGNED],,[Enable hardware support for misaligned loads and stores]) +]) + +AC_ARG_ENABLE([dual-endian], AS_HELP_STRING([--enable-dual-endian], [Enable support for running target in either endianness])) +AS_IF([test "x$enable_dual_endian" = "xyes"], [ + AC_DEFINE([RISCV_ENABLE_DUAL_ENDIAN],,[Enable support for running target in either endianness]) +]) diff --git a/vendor/riscv-isa-sim/riscv/riscv.mk.in b/vendor/riscv-isa-sim/riscv/riscv.mk.in new file mode 100644 index 00000000..0c6b977f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/riscv.mk.in @@ -0,0 +1,1301 @@ +get_insn_list = $(shell grep ^DECLARE_INSN $(1) | sed 's/DECLARE_INSN(\(.*\),.*,.*)/\1/') +get_opcode = $(shell grep ^DECLARE_INSN.*\\\<$(2)\\\> $(1) | sed 's/DECLARE_INSN(.*,\(.*\),.*)/\1/') + +riscv_subproject_deps = \ + fdt \ + softfloat \ + +riscv_install_prog_srcs = \ + +riscv_hdrs = \ + abstract_device.h \ + common.h \ + decode.h \ + devices.h \ + dts.h \ + mmu.h \ + cfg.h \ + processor.h \ + sim.h \ + simif.h \ + trap.h \ + encoding.h \ + cachesim.h \ + memtracer.h \ + mmio_plugin.h \ + tracer.h \ + extension.h \ + rocc.h \ + insn_template.h \ + debug_module.h \ + debug_rom_defines.h \ + remote_bitbang.h \ + jtag_dtm.h \ + csrs.h \ + triggers.h \ + +riscv_install_hdrs = mmio_plugin.h + +riscv_precompiled_hdrs = \ + insn_template.h \ + +riscv_srcs = \ + isa_parser.cc \ + processor.cc \ + execute.cc \ + dts.cc \ + sim.cc \ + interactive.cc \ + cachesim.cc \ + mmu.cc \ + extension.cc \ + extensions.cc \ + rocc.cc \ + devices.cc \ + rom.cc \ + clint.cc \ + debug_module.cc \ + remote_bitbang.cc \ + jtag_dtm.cc \ + csrs.cc \ + triggers.cc \ + $(riscv_gen_srcs) \ + +riscv_test_srcs = + +riscv_gen_hdrs = \ + insn_list.h \ + + +riscv_insn_ext_i = \ + add \ + addi \ + addiw \ + addw \ + and \ + andi \ + auipc \ + beq \ + bge \ + bgeu \ + blt \ + bltu \ + bne \ + jal \ + jalr \ + lb \ + lbu \ + ld \ + lh \ + lhu \ + lui \ + lw \ + lwu \ + or \ + ori \ + sb \ + sd \ + sh \ + sll \ + slli \ + slliw \ + sllw \ + slt \ + slti \ + sltiu \ + sltu \ + sra \ + srai \ + sraiw \ + sraw \ + srl \ + srli \ + srliw \ + srlw \ + sub \ + subw \ + sw \ + xor \ + xori \ + fence \ + fence_i \ + +riscv_insn_ext_a = \ + amoadd_d \ + amoadd_w \ + amoand_d \ + amoand_w \ + amomax_d \ + amomaxu_d \ + amomaxu_w \ + amomax_w \ + amomin_d \ + amominu_d \ + amominu_w \ + amomin_w \ + amoor_d \ + amoor_w \ + amoswap_d \ + amoswap_w \ + amoxor_d \ + amoxor_w \ + lr_d \ + lr_w \ + sc_d \ + sc_w \ + +riscv_insn_ext_c = \ + c_add \ + c_addi \ + c_addi4spn \ + c_addw \ + c_and \ + c_andi \ + c_beqz \ + c_bnez \ + c_ebreak \ + c_fld \ + c_fldsp \ + c_flw \ + c_flwsp \ + c_fsd \ + c_fsdsp \ + c_fsw \ + c_fswsp \ + c_j \ + c_jal \ + c_jalr \ + c_jr \ + c_li \ + c_lui \ + c_lw \ + c_lwsp \ + c_mv \ + c_or \ + c_slli \ + c_srai \ + c_srli \ + c_sub \ + c_subw \ + c_sw \ + c_swsp \ + c_xor \ + +riscv_insn_ext_m = \ + div \ + divu \ + divuw \ + divw \ + mul \ + mulh \ + mulhsu \ + mulhu \ + mulw \ + rem \ + remu \ + remuw \ + remw \ + +riscv_insn_ext_f = \ + fadd_s \ + fclass_s \ + fcvt_l_s \ + fcvt_lu_s \ + fcvt_s_l \ + fcvt_s_lu \ + fcvt_s_w \ + fcvt_s_wu \ + fcvt_w_s \ + fcvt_wu_s \ + fdiv_s \ + feq_s \ + fle_s \ + flt_s \ + flw \ + fmadd_s \ + fmax_s \ + fmin_s \ + fmsub_s \ + fmul_s \ + fmv_w_x \ + fmv_x_w \ + fnmadd_s \ + fnmsub_s \ + fsgnj_s \ + fsgnjn_s \ + fsgnjx_s \ + fsqrt_s \ + fsub_s \ + fsw \ + +riscv_insn_ext_d = \ + fadd_d \ + fclass_d \ + fcvt_d_l \ + fcvt_d_lu \ + fcvt_d_q \ + fcvt_d_s \ + fcvt_d_w \ + fcvt_d_wu \ + fcvt_l_d \ + fcvt_lu_d \ + fcvt_s_d \ + fcvt_w_d \ + fcvt_wu_d \ + fdiv_d \ + feq_d \ + fld \ + fle_d \ + flt_d \ + fmadd_d \ + fmax_d \ + fmin_d \ + fmsub_d \ + fmul_d \ + fmv_d_x \ + fmv_x_d \ + fnmadd_d \ + fnmsub_d \ + fsd \ + fsgnj_d \ + fsgnjn_d \ + fsgnjx_d \ + fsqrt_d \ + fsub_d \ + +riscv_insn_ext_zfh = \ + fadd_h \ + fclass_h \ + fcvt_l_h \ + fcvt_lu_h \ + fcvt_d_h \ + fcvt_h_d \ + fcvt_h_l \ + fcvt_h_lu \ + fcvt_h_q \ + fcvt_h_s \ + fcvt_h_w \ + fcvt_h_wu \ + fcvt_q_h \ + fcvt_s_h \ + fcvt_w_h \ + fcvt_wu_h \ + fdiv_h \ + feq_h \ + fle_h \ + flh \ + flt_h \ + fmadd_h \ + fmax_h \ + fmin_h \ + fmsub_h \ + fmul_h \ + fmv_h_x \ + fmv_x_h \ + fnmadd_h \ + fnmsub_h \ + fsgnj_h \ + fsgnjn_h \ + fsgnjx_h \ + fsh \ + fsqrt_h \ + fsub_h \ + +riscv_insn_ext_q = \ + fadd_q \ + fclass_q \ + fcvt_l_q \ + fcvt_lu_q \ + fcvt_q_d \ + fcvt_q_l \ + fcvt_q_lu \ + fcvt_q_s \ + fcvt_q_w \ + fcvt_q_wu \ + fcvt_s_q \ + fcvt_w_q \ + fcvt_wu_q \ + fdiv_q \ + feq_q \ + fle_q \ + flq \ + flt_q \ + fmadd_q \ + fmax_q \ + fmin_q \ + fmsub_q \ + fmul_q \ + fnmadd_q \ + fnmsub_q \ + fsgnj_q \ + fsgnjn_q \ + fsgnjx_q \ + fsq \ + fsqrt_q \ + fsub_q \ + +riscv_insn_ext_b = \ + add_uw \ + andn \ + bdecompress \ + bdecompressw \ + bcompress \ + bcompressw \ + bfp \ + bfpw \ + bmatflip \ + bmator \ + bmatxor \ + sh1add \ + sh1add_uw \ + sh2add \ + sh2add_uw \ + sh3add \ + sh3add_uw \ + clmul \ + clmulh \ + clmulr \ + clz \ + clzw \ + cmix \ + cmov \ + crc32_b \ + crc32c_b \ + crc32c_d \ + crc32c_h \ + crc32c_w \ + crc32_d \ + crc32_h \ + crc32_w \ + ctz \ + ctzw \ + fsl \ + fslw \ + fsr \ + fsri \ + fsriw \ + fsrw \ + gorc \ + gorci \ + gorciw \ + gorcw \ + grev \ + grevi \ + greviw \ + grevw \ + max \ + maxu \ + min \ + minu \ + orn \ + pack \ + packh \ + packu \ + packuw \ + packw \ + cpop \ + cpopw \ + rol \ + rolw \ + ror \ + rori \ + roriw \ + rorw \ + bclr \ + bclri \ + bext \ + bexti \ + binv \ + binvi \ + bset \ + bseti \ + sext_b \ + sext_h \ + shfl \ + shfli \ + shflw \ + slli_uw \ + slo \ + sloi \ + sloiw \ + slow \ + sro \ + sroi \ + sroiw \ + srow \ + unshfl \ + unshfli \ + unshflw \ + xnor \ + xperm4 \ + xperm8 \ + xperm16 \ + xperm32 \ + +# Scalar Crypto ISE +riscv_insn_ext_k = \ + aes32dsi \ + aes32dsmi \ + aes32esi \ + aes32esmi \ + aes64ds \ + aes64dsm \ + aes64es \ + aes64esm \ + aes64ks1i \ + aes64ks2 \ + aes64im \ + sha256sig0 \ + sha256sig1 \ + sha256sum0 \ + sha256sum1 \ + sha512sig0 \ + sha512sig0h \ + sha512sig0l \ + sha512sig1 \ + sha512sig1h \ + sha512sig1l \ + sha512sum0 \ + sha512sum0r \ + sha512sum1 \ + sha512sum1r \ + sm3p0 \ + sm3p1 \ + sm4ed \ + sm4ks + +riscv_insn_ext_v_alu_int = \ + vaadd_vv \ + vaaddu_vv \ + vaadd_vx \ + vaaddu_vx \ + vadc_vim \ + vadc_vvm \ + vadc_vxm \ + vadd_vi \ + vadd_vv \ + vadd_vx \ + vand_vi \ + vand_vv \ + vand_vx \ + vasub_vv \ + vasubu_vv \ + vasub_vx \ + vasubu_vx \ + vcompress_vm \ + vcpop_m \ + vdiv_vv \ + vdiv_vx \ + vdivu_vv \ + vdivu_vx \ + vid_v \ + viota_m \ + vmacc_vv \ + vmacc_vx \ + vmadc_vv \ + vmadc_vx \ + vmadc_vi \ + vmadc_vim \ + vmadc_vvm \ + vmadc_vxm \ + vmadd_vv \ + vmadd_vx \ + vmand_mm \ + vmandn_mm \ + vmax_vv \ + vmax_vx \ + vmaxu_vv \ + vmaxu_vx \ + vmerge_vim \ + vmerge_vvm \ + vmerge_vxm \ + vfirst_m \ + vmin_vv \ + vmin_vx \ + vminu_vv \ + vminu_vx \ + vmnand_mm \ + vmnor_mm \ + vmor_mm \ + vmorn_mm \ + vmsbc_vv \ + vmsbc_vx \ + vmsbc_vvm \ + vmsbc_vxm \ + vmsbf_m \ + vmseq_vi \ + vmseq_vv \ + vmseq_vx \ + vmsgt_vi \ + vmsgt_vx \ + vmsgtu_vi \ + vmsgtu_vx \ + vmsif_m \ + vmsle_vi \ + vmsle_vv \ + vmsle_vx \ + vmsleu_vi \ + vmsleu_vv \ + vmsleu_vx \ + vmslt_vv \ + vmslt_vx \ + vmsltu_vv \ + vmsltu_vx \ + vmsne_vi \ + vmsne_vv \ + vmsne_vx \ + vmsof_m \ + vmul_vv \ + vmul_vx \ + vmulh_vv \ + vmulh_vx \ + vmulhsu_vv \ + vmulhsu_vx \ + vmulhu_vv \ + vmulhu_vx \ + vmv_s_x \ + vmv_v_i \ + vmv_v_v \ + vmv_v_x \ + vmv_x_s \ + vmv1r_v \ + vmv2r_v \ + vmv4r_v \ + vmv8r_v \ + vmxnor_mm \ + vmxor_mm \ + vnclip_wi \ + vnclip_wv \ + vnclip_wx \ + vnclipu_wi \ + vnclipu_wv \ + vnclipu_wx \ + vnmsac_vv \ + vnmsac_vx \ + vnmsub_vv \ + vnmsub_vx \ + vnsra_wi \ + vnsra_wv \ + vnsra_wx \ + vnsrl_wi \ + vnsrl_wv \ + vnsrl_wx \ + vor_vi \ + vor_vv \ + vor_vx \ + vredand_vs \ + vredmax_vs \ + vredmaxu_vs \ + vredmin_vs \ + vredminu_vs \ + vredor_vs \ + vredsum_vs \ + vredxor_vs \ + vrem_vv \ + vrem_vx \ + vremu_vv \ + vremu_vx \ + vrgather_vi \ + vrgather_vv \ + vrgather_vx \ + vrgatherei16_vv \ + vrsub_vi \ + vrsub_vx \ + vsadd_vi \ + vsadd_vv \ + vsadd_vx \ + vsaddu_vi \ + vsaddu_vv \ + vsaddu_vx \ + vsbc_vvm \ + vsbc_vxm \ + vsext_vf2 \ + vsext_vf4 \ + vsext_vf8 \ + vslide1down_vx \ + vslide1up_vx \ + vslidedown_vi \ + vslidedown_vx \ + vslideup_vi \ + vslideup_vx \ + vsll_vi \ + vsll_vv \ + vsll_vx \ + vsmul_vv \ + vsmul_vx \ + vsra_vi \ + vsra_vv \ + vsra_vx \ + vsrl_vi \ + vsrl_vv \ + vsrl_vx \ + vssra_vi \ + vssra_vv \ + vssra_vx \ + vssrl_vi \ + vssrl_vv \ + vssrl_vx \ + vssub_vv \ + vssub_vx \ + vssubu_vv \ + vssubu_vx \ + vsub_vv \ + vsub_vx \ + vwadd_vv \ + vwadd_vx \ + vwadd_wv \ + vwadd_wx \ + vwaddu_vv \ + vwaddu_vx \ + vwaddu_wv \ + vwaddu_wx \ + vwmacc_vv \ + vwmacc_vx \ + vwmaccsu_vv \ + vwmaccsu_vx \ + vwmaccu_vv \ + vwmaccu_vx \ + vwmaccus_vx \ + vwmul_vv \ + vwmul_vx \ + vwmulsu_vv \ + vwmulsu_vx \ + vwmulu_vv \ + vwmulu_vx \ + vwredsum_vs \ + vwredsumu_vs \ + vwsub_vv \ + vwsub_vx \ + vwsub_wv \ + vwsub_wx \ + vwsubu_vv \ + vwsubu_vx \ + vwsubu_wv \ + vwsubu_wx \ + vxor_vi \ + vxor_vv \ + vxor_vx \ + vzext_vf2 \ + vzext_vf4 \ + vzext_vf8 \ + +riscv_insn_ext_v_alu_fp = \ + vfadd_vf \ + vfadd_vv \ + vfclass_v \ + vfcvt_f_x_v \ + vfcvt_f_xu_v \ + vfcvt_rtz_x_f_v \ + vfcvt_rtz_xu_f_v \ + vfcvt_x_f_v \ + vfcvt_xu_f_v \ + vfdiv_vf \ + vfdiv_vv \ + vfmacc_vf \ + vfmacc_vv \ + vfmadd_vf \ + vfmadd_vv \ + vfmax_vf \ + vfmax_vv \ + vfmerge_vfm \ + vfmin_vf \ + vfmin_vv \ + vfmsac_vf \ + vfmsac_vv \ + vfmsub_vf \ + vfmsub_vv \ + vfmul_vf \ + vfmul_vv \ + vfmv_f_s \ + vfmv_s_f \ + vfmv_v_f \ + vfncvt_f_f_w \ + vfncvt_f_x_w \ + vfncvt_f_xu_w \ + vfncvt_rod_f_f_w \ + vfncvt_rtz_x_f_w \ + vfncvt_rtz_xu_f_w \ + vfncvt_x_f_w \ + vfncvt_xu_f_w \ + vfnmacc_vf \ + vfnmacc_vv \ + vfnmadd_vf \ + vfnmadd_vv \ + vfnmsac_vf \ + vfnmsac_vv \ + vfnmsub_vf \ + vfnmsub_vv \ + vfrdiv_vf \ + vfredmax_vs \ + vfredmin_vs \ + vfredosum_vs \ + vfredusum_vs \ + vfrec7_v \ + vfrsub_vf \ + vfrsqrt7_v \ + vfsgnj_vf \ + vfsgnj_vv \ + vfsgnjn_vf \ + vfsgnjn_vv \ + vfsgnjx_vf \ + vfsgnjx_vv \ + vfsqrt_v \ + vfslide1down_vf \ + vfslide1up_vf \ + vfsub_vf \ + vfsub_vv \ + vfwadd_vf \ + vfwadd_vv \ + vfwadd_wf \ + vfwadd_wv \ + vfwcvt_f_f_v \ + vfwcvt_f_x_v \ + vfwcvt_f_xu_v \ + vfwcvt_rtz_x_f_v \ + vfwcvt_rtz_xu_f_v \ + vfwcvt_x_f_v \ + vfwcvt_xu_f_v \ + vfwmacc_vf \ + vfwmacc_vv \ + vfwmsac_vf \ + vfwmsac_vv \ + vfwmul_vf \ + vfwmul_vv \ + vfwnmacc_vf \ + vfwnmacc_vv \ + vfwnmsac_vf \ + vfwnmsac_vv \ + vfwredosum_vs \ + vfwredusum_vs \ + vfwsub_vf \ + vfwsub_vv \ + vfwsub_wf \ + vfwsub_wv \ + vmfeq_vf \ + vmfeq_vv \ + vmfge_vf \ + vmfgt_vf \ + vmfle_vf \ + vmfle_vv \ + vmflt_vf \ + vmflt_vv \ + vmfne_vf \ + vmfne_vv \ + +riscv_insn_ext_v_amo = \ + vamoswapei8_v \ + vamoaddei8_v \ + vamoandei8_v \ + vamomaxei8_v \ + vamomaxuei8_v \ + vamominei8_v \ + vamominuei8_v \ + vamoorei8_v \ + vamoxorei8_v \ + vamoswapei16_v \ + vamoaddei16_v \ + vamoandei16_v \ + vamomaxei16_v \ + vamomaxuei16_v \ + vamominei16_v \ + vamominuei16_v \ + vamoorei16_v \ + vamoxorei16_v \ + vamoswapei32_v \ + vamoaddei32_v \ + vamoandei32_v \ + vamomaxei32_v \ + vamomaxuei32_v \ + vamominei32_v \ + vamominuei32_v \ + vamoorei32_v \ + vamoxorei32_v \ + vamoswapei64_v \ + vamoaddei64_v \ + vamoandei64_v \ + vamomaxei64_v \ + vamomaxuei64_v \ + vamominei64_v \ + vamominuei64_v \ + vamoorei64_v \ + vamoxorei64_v \ + +riscv_insn_ext_v_ldst = \ + vlm_v \ + vle8_v \ + vle16_v \ + vle32_v \ + vle64_v \ + vloxei8_v \ + vloxei16_v \ + vloxei32_v \ + vloxei64_v \ + vlse8_v \ + vlse16_v \ + vlse32_v \ + vlse64_v \ + vluxei8_v \ + vluxei16_v \ + vluxei32_v \ + vluxei64_v \ + vle8ff_v \ + vle16ff_v \ + vle32ff_v \ + vle64ff_v \ + vl1re8_v \ + vl2re8_v \ + vl4re8_v \ + vl8re8_v \ + vl1re16_v \ + vl2re16_v \ + vl4re16_v \ + vl8re16_v \ + vl1re32_v \ + vl2re32_v \ + vl4re32_v \ + vl8re32_v \ + vl1re64_v \ + vl2re64_v \ + vl4re64_v \ + vl8re64_v \ + vsm_v \ + vse8_v \ + vse16_v \ + vse32_v \ + vse64_v \ + vsse8_v \ + vsoxei8_v \ + vsoxei16_v \ + vsoxei32_v \ + vsoxei64_v \ + vsse16_v \ + vsse32_v \ + vsse64_v \ + vsuxei8_v \ + vsuxei16_v \ + vsuxei32_v \ + vsuxei64_v \ + vs1r_v \ + vs2r_v \ + vs4r_v \ + vs8r_v \ + +riscv_insn_ext_v_ctrl = \ + vsetivli \ + vsetvli \ + vsetvl \ + +riscv_insn_ext_v = \ + $(riscv_insn_ext_v_alu_fp) \ + $(riscv_insn_ext_v_alu_int) \ + $(riscv_insn_ext_v_amo) \ + $(riscv_insn_ext_v_ctrl) \ + $(riscv_insn_ext_v_ldst) \ + +riscv_insn_ext_h = \ + hfence_gvma \ + hfence_vvma \ + hlv_b \ + hlv_bu \ + hlv_h \ + hlv_hu \ + hlvx_hu \ + hlv_w \ + hlv_wu \ + hlvx_wu \ + hlv_d \ + hsv_b \ + hsv_h \ + hsv_w \ + hsv_d \ + +riscv_insn_ext_p_simd = \ + add16 \ + radd16 \ + uradd16 \ + kadd16 \ + ukadd16 \ + sub16 \ + rsub16 \ + ursub16 \ + ksub16 \ + uksub16 \ + cras16 \ + rcras16 \ + urcras16 \ + kcras16 \ + ukcras16 \ + crsa16 \ + rcrsa16 \ + urcrsa16 \ + kcrsa16 \ + ukcrsa16 \ + stas16 \ + rstas16 \ + urstas16 \ + kstas16 \ + ukstas16 \ + stsa16 \ + rstsa16 \ + urstsa16 \ + kstsa16 \ + ukstsa16 \ + add8 \ + radd8 \ + uradd8 \ + kadd8 \ + ukadd8 \ + sub8 \ + rsub8 \ + ursub8 \ + ksub8 \ + uksub8 \ + sra16 \ + srai16 \ + sra16_u \ + srai16_u \ + srl16 \ + srli16 \ + srl16_u \ + srli16_u \ + sll16 \ + slli16 \ + ksll16 \ + kslli16 \ + kslra16 \ + kslra16_u \ + sra8 \ + srai8 \ + sra8_u \ + srai8_u \ + srl8 \ + srli8 \ + srl8_u \ + srli8_u \ + sll8 \ + slli8 \ + ksll8 \ + kslli8 \ + kslra8 \ + kslra8_u \ + cmpeq16 \ + scmplt16 \ + scmple16 \ + ucmplt16 \ + ucmple16 \ + cmpeq8 \ + scmplt8 \ + scmple8 \ + ucmplt8 \ + ucmple8 \ + smul16 \ + smulx16 \ + umul16 \ + umulx16 \ + khm16 \ + khmx16 \ + smul8 \ + smulx8 \ + umul8 \ + umulx8 \ + khm8 \ + khmx8 \ + smin16 \ + umin16 \ + smax16 \ + umax16 \ + sclip16 \ + uclip16 \ + kabs16 \ + clrs16 \ + clz16 \ + smin8 \ + umin8 \ + smax8 \ + umax8 \ + sclip8 \ + uclip8 \ + kabs8 \ + clrs8 \ + clz8 \ + sunpkd810 \ + sunpkd820 \ + sunpkd830 \ + sunpkd831 \ + sunpkd832 \ + zunpkd810 \ + zunpkd820 \ + zunpkd830 \ + zunpkd831 \ + zunpkd832 \ + +riscv_insn_ext_p_partial_simd = \ + pkbb16 \ + pkbt16 \ + pktb16 \ + pktt16 \ + smmul \ + smmul_u \ + kmmac \ + kmmac_u \ + kmmsb \ + kmmsb_u \ + kwmmul \ + kwmmul_u \ + smmwb \ + smmwb_u \ + smmwt \ + smmwt_u \ + kmmawb \ + kmmawb_u \ + kmmawt \ + kmmawt_u \ + kmmwb2 \ + kmmwb2_u \ + kmmwt2 \ + kmmwt2_u \ + kmmawb2 \ + kmmawb2_u \ + kmmawt2 \ + kmmawt2_u \ + smbb16 \ + smbt16 \ + smtt16 \ + kmda \ + kmxda \ + smds \ + smdrs \ + smxds \ + kmabb \ + kmabt \ + kmatt \ + kmada \ + kmaxda \ + kmads \ + kmadrs \ + kmaxds \ + kmsda \ + kmsxda \ + smal \ + sclip32 \ + uclip32 \ + clrs32 \ + pbsad \ + pbsada \ + smaqa \ + umaqa \ + smaqa_su \ + +riscv_insn_ext_p_64_bit_profile = \ + add64 \ + radd64 \ + uradd64 \ + kadd64 \ + ukadd64 \ + sub64 \ + rsub64 \ + ursub64 \ + ksub64 \ + uksub64 \ + smar64 \ + smsr64 \ + umar64 \ + umsr64 \ + kmar64 \ + kmsr64 \ + ukmar64 \ + ukmsr64 \ + smalbb \ + smalbt \ + smaltt \ + smalda \ + smalxda \ + smalds \ + smaldrs \ + smalxds \ + smslda \ + smslxda \ + +riscv_insn_ext_p_non_simd = \ + kaddh \ + ksubh \ + khmbb \ + khmbt \ + khmtt \ + ukaddh \ + uksubh \ + kaddw \ + ukaddw \ + ksubw \ + uksubw \ + kdmbb \ + kdmbt \ + kdmtt \ + kslraw \ + kslraw_u \ + ksllw \ + kslliw \ + kdmabb \ + kdmabt \ + kdmatt \ + kabsw \ + raddw \ + uraddw \ + rsubw \ + ursubw \ + mulr64 \ + mulsr64 \ + msubr32 \ + ave \ + sra_u \ + srai_u \ + insb \ + maddr32 \ + +riscv_insn_ext_p_rv64_only = \ + add32 \ + radd32 \ + uradd32 \ + kadd32 \ + ukadd32 \ + sub32 \ + rsub32 \ + ursub32 \ + ksub32 \ + uksub32 \ + cras32 \ + rcras32 \ + urcras32 \ + kcras32 \ + ukcras32 \ + crsa32 \ + rcrsa32 \ + urcrsa32 \ + kcrsa32 \ + ukcrsa32 \ + stas32 \ + rstas32 \ + urstas32 \ + kstas32 \ + ukstas32 \ + stsa32 \ + rstsa32 \ + urstsa32 \ + kstsa32 \ + ukstsa32 \ + sra32 \ + srai32 \ + sra32_u \ + srai32_u \ + srl32 \ + srli32 \ + srl32_u \ + srli32_u \ + sll32 \ + slli32 \ + ksll32 \ + kslli32 \ + kslra32 \ + kslra32_u \ + smin32 \ + umin32 \ + smax32 \ + umax32 \ + kabs32 \ + khmbb16 \ + khmbt16 \ + khmtt16 \ + kdmbb16 \ + kdmbt16 \ + kdmtt16 \ + kdmabb16 \ + kdmabt16 \ + kdmatt16 \ + smbt32 \ + smtt32 \ + kmabb32 \ + kmabt32 \ + kmatt32 \ + kmda32 \ + kmxda32 \ + kmaxda32 \ + kmads32 \ + kmadrs32 \ + kmaxds32 \ + kmsda32 \ + kmsxda32 \ + smds32 \ + smdrs32 \ + smxds32 \ + sraiw_u \ + pkbt32 \ + pktb32 \ + clz32 \ + +riscv_insn_ext_p = \ + $(riscv_insn_ext_p_simd) \ + $(riscv_insn_ext_p_partial_simd) \ + $(riscv_insn_ext_p_64_bit_profile) \ + $(riscv_insn_ext_p_non_simd) \ + $(riscv_insn_ext_p_rv64_only) \ + +riscv_insn_priv = \ + csrrc \ + csrrci \ + csrrs \ + csrrsi \ + csrrw \ + csrrwi \ + dret \ + ebreak \ + ecall \ + mret \ + sfence_vma \ + sret \ + wfi \ + +riscv_insn_svinval = \ + sfence_w_inval \ + sfence_inval_ir \ + sinval_vma \ + hinval_vvma \ + hinval_gvma \ + +riscv_insn_ext_cmo = \ + cbo_clean \ + cbo_flush \ + cbo_inval \ + cbo_zero \ + +riscv_insn_list = \ + $(riscv_insn_ext_a) \ + $(riscv_insn_ext_c) \ + $(riscv_insn_ext_i) \ + $(riscv_insn_ext_m) \ + $(riscv_insn_ext_f) \ + $(riscv_insn_ext_d) \ + $(riscv_insn_ext_zfh) \ + $(riscv_insn_ext_q) \ + $(riscv_insn_ext_b) \ + $(riscv_insn_ext_k) \ + $(if $(HAVE_INT128),$(riscv_insn_ext_v),) \ + $(riscv_insn_ext_h) \ + $(riscv_insn_ext_p) \ + $(riscv_insn_priv) \ + $(riscv_insn_svinval) \ + $(riscv_insn_ext_cmo) \ + +riscv_gen_srcs = \ + $(addsuffix .cc,$(riscv_insn_list)) + +insn_list.h: $(src_dir)/riscv/riscv.mk.in + for insn in $(foreach insn,$(riscv_insn_list),$(subst .,_,$(insn))) ; do \ + printf 'DEFINE_INSN(%s)\n' "$${insn}" ; \ + done > $@.tmp + mv $@.tmp $@ + +$(riscv_gen_srcs): %.cc: insns/%.h insn_template.cc + sed 's/NAME/$(subst .cc,,$@)/' $(src_dir)/riscv/insn_template.cc | sed 's/OPCODE/$(call get_opcode,$(src_dir)/riscv/encoding.h,$(subst .cc,,$@))/' > $@ + +riscv_junk = \ + $(riscv_gen_srcs) \ diff --git a/vendor/riscv-isa-sim/riscv/rocc.cc b/vendor/riscv-isa-sim/riscv/rocc.cc new file mode 100644 index 00000000..2d090952 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/rocc.cc @@ -0,0 +1,46 @@ +// See LICENSE for license details. + +#include "rocc.h" +#include "trap.h" +#include + +#define customX(n) \ + static reg_t c##n(processor_t* p, insn_t insn, reg_t pc) \ + { \ + rocc_t* rocc = static_cast(p->get_extension()); \ + rocc_insn_union_t u; \ + u.i = insn; \ + reg_t xs1 = u.r.xs1 ? RS1 : -1; \ + reg_t xs2 = u.r.xs2 ? RS2 : -1; \ + reg_t xd = rocc->custom##n(u.r, xs1, xs2); \ + if (u.r.xd) \ + WRITE_RD(xd); \ + return pc+4; \ + } \ + \ + reg_t rocc_t::custom##n(rocc_insn_t insn, reg_t xs1, reg_t xs2) \ + { \ + illegal_instruction(); \ + return 0; \ + } + +customX(0) +customX(1) +customX(2) +customX(3) + +std::vector rocc_t::get_instructions() +{ + std::vector insns; + insns.push_back((insn_desc_t){true, 0x0b, 0x7f, &::illegal_instruction, c0, &::illegal_instruction, c0}); + insns.push_back((insn_desc_t){true, 0x2b, 0x7f, &::illegal_instruction, c1, &::illegal_instruction, c1}); + insns.push_back((insn_desc_t){true, 0x5b, 0x7f, &::illegal_instruction, c2, &::illegal_instruction, c2}); + insns.push_back((insn_desc_t){true, 0x7b, 0x7f, &::illegal_instruction, c3, &::illegal_instruction, c3}); + return insns; +} + +std::vector rocc_t::get_disasms() +{ + std::vector insns; + return insns; +} diff --git a/vendor/riscv-isa-sim/riscv/rocc.h b/vendor/riscv-isa-sim/riscv/rocc.h new file mode 100644 index 00000000..1a522ab4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/rocc.h @@ -0,0 +1,61 @@ +#ifndef _RISCV_ROCC_H +#define _RISCV_ROCC_H + +#include "extension.h" + +struct rocc_insn_t +{ + unsigned opcode : 7; + unsigned rd : 5; + unsigned xs2 : 1; + unsigned xs1 : 1; + unsigned xd : 1; + unsigned rs1 : 5; + unsigned rs2 : 5; + unsigned funct : 7; +}; + +union rocc_insn_union_t +{ + rocc_insn_t r; + insn_t i; +}; + +class rocc_t : public extension_t +{ + public: + virtual reg_t custom0(rocc_insn_t insn, reg_t xs1, reg_t xs2); + virtual reg_t custom1(rocc_insn_t insn, reg_t xs1, reg_t xs2); + virtual reg_t custom2(rocc_insn_t insn, reg_t xs1, reg_t xs2); + virtual reg_t custom3(rocc_insn_t insn, reg_t xs1, reg_t xs2); + std::vector get_instructions(); + std::vector get_disasms(); +}; + +#define define_custom_func(type_name, ext_name_str, func_name, method_name) \ + static reg_t func_name(processor_t* p, insn_t insn, reg_t pc) \ + { \ + type_name* rocc = static_cast(p->get_extension(ext_name_str)); \ + rocc_insn_union_t u; \ + u.i = insn; \ + reg_t xs1 = u.r.xs1 ? RS1 : -1; \ + reg_t xs2 = u.r.xs2 ? RS2 : -1; \ + reg_t xd = rocc->method_name(u.r, xs1, xs2); \ + if (u.r.xd) \ + WRITE_RD(xd); \ + return pc+4; \ + } \ + +#define push_custom_insn(insn_list, opcode, opcode_mask, func_name_32, func_name_64) \ + insn_list.push_back((insn_desc_t){opcode, opcode_mask, func_name_32, func_name_64}) + +#define ILLEGAL_INSN_FUNC &::illegal_instruction + +#define ROCC_OPCODE0 0x0b +#define ROCC_OPCODE1 0x2b +#define ROCC_OPCODE2 0x5b +#define ROCC_OPCODE3 0x7b + +#define ROCC_OPCODE_MASK 0x7f + +#endif diff --git a/vendor/riscv-isa-sim/riscv/rom.cc b/vendor/riscv-isa-sim/riscv/rom.cc new file mode 100644 index 00000000..b8528621 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/rom.cc @@ -0,0 +1,19 @@ +#include "devices.h" + +rom_device_t::rom_device_t(std::vector data) + : data(data) +{ +} + +bool rom_device_t::load(reg_t addr, size_t len, uint8_t* bytes) +{ + if (addr + len > data.size()) + return false; + memcpy(bytes, &data[addr], len); + return true; +} + +bool rom_device_t::store(reg_t addr, size_t len, const uint8_t* bytes) +{ + return false; +} diff --git a/vendor/riscv-isa-sim/riscv/sim.cc b/vendor/riscv-isa-sim/riscv/sim.cc new file mode 100644 index 00000000..069e1b51 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/sim.cc @@ -0,0 +1,438 @@ +// See LICENSE for license details. + +#include "sim.h" +#include "mmu.h" +#include "dts.h" +#include "remote_bitbang.h" +#include "byteorder.h" +#include "platform.h" +#include "libfdt.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +volatile bool ctrlc_pressed = false; +static void handle_signal(int sig) +{ + if (ctrlc_pressed) + exit(-1); + ctrlc_pressed = true; + signal(sig, &handle_signal); +} + +sim_t::sim_t(const cfg_t *cfg, bool halted, + std::vector> mems, + std::vector> plugin_devices, + const std::vector& args, + const debug_module_config_t &dm_config, + const char *log_path, + bool dtb_enabled, const char *dtb_file, +#ifdef HAVE_BOOST_ASIO + boost::asio::io_service *io_service_ptr, boost::asio::ip::tcp::acceptor *acceptor_ptr, // option -s +#endif + FILE *cmd_file) // needed for command line option --cmd + : htif_t(args), + isa(cfg->isa(), cfg->priv()), + cfg(cfg), + mems(mems), + plugin_devices(plugin_devices), + procs(std::max(cfg->nprocs(), size_t(1))), + dtb_file(dtb_file ? dtb_file : ""), + dtb_enabled(dtb_enabled), + log_file(log_path), + cmd_file(cmd_file), +#ifdef HAVE_BOOST_ASIO + io_service_ptr(io_service_ptr), // socket interface + acceptor_ptr(acceptor_ptr), +#endif + sout_(nullptr), + current_step(0), + current_proc(0), + debug(false), + histogram_enabled(false), + log(false), + remote_bitbang(NULL), + debug_module(this, dm_config) +{ + signal(SIGINT, &handle_signal); + + sout_.rdbuf(std::cerr.rdbuf()); // debug output goes to stderr by default + + for (auto& x : mems) + bus.add_device(x.first, x.second); + + for (auto& x : plugin_devices) + bus.add_device(x.first, x.second); + + debug_module.add_device(&bus); + + debug_mmu = new mmu_t(this, NULL); + + for (size_t i = 0; i < cfg->nprocs(); i++) { + procs[i] = new processor_t(&isa, cfg->varch(), this, cfg->hartids()[i], halted, + log_file.get(), sout_); + } + + make_dtb(); + + void *fdt = (void *)dtb.c_str(); + + // Only make a CLINT (Core-Local INTerrupt controller) if one is specified in + // the device tree configuration. + // + // This isn't *quite* as general as we could get (because you might have one + // that's not bus-accessible), but it should handle the normal use cases. In + // particular, the default device tree configuration that you get without + // setting the dtb_file argument has one. + reg_t clint_base; + if (fdt_parse_clint(fdt, &clint_base, "riscv,clint0") == 0) { + clint.reset(new clint_t(procs, CPU_HZ / INSNS_PER_RTC_TICK, cfg->real_time_clint())); + bus.add_device(clint_base, clint.get()); + } + + //per core attribute + int cpu_offset = 0, rc; + size_t cpu_idx = 0; + cpu_offset = fdt_get_offset(fdt, "/cpus"); + if (cpu_offset < 0) + return; + + for (cpu_offset = fdt_get_first_subnode(fdt, cpu_offset); cpu_offset >= 0; + cpu_offset = fdt_get_next_subnode(fdt, cpu_offset)) { + + if (cpu_idx >= nprocs()) + break; + + //handle pmp + reg_t pmp_num = 0, pmp_granularity = 0; + if (fdt_parse_pmp_num(fdt, cpu_offset, &pmp_num) == 0) { + if (pmp_num <= 64) { + procs[cpu_idx]->set_pmp_num(pmp_num); + } else { + std::cerr << "core (" + << cpu_idx + << ") doesn't have valid 'riscv,pmpregions'" + << pmp_num << ").\n"; + exit(1); + } + } else { + procs[cpu_idx]->set_pmp_num(0); + } + + if (fdt_parse_pmp_alignment(fdt, cpu_offset, &pmp_granularity) == 0) { + procs[cpu_idx]->set_pmp_granularity(pmp_granularity); + } + + //handle mmu-type + const char *mmu_type; + rc = fdt_parse_mmu_type(fdt, cpu_offset, &mmu_type); + if (rc == 0) { + procs[cpu_idx]->set_mmu_capability(IMPL_MMU_SBARE); + if (strncmp(mmu_type, "riscv,sv32", strlen("riscv,sv32")) == 0) { + procs[cpu_idx]->set_mmu_capability(IMPL_MMU_SV32); + } else if (strncmp(mmu_type, "riscv,sv39", strlen("riscv,sv39")) == 0) { + procs[cpu_idx]->set_mmu_capability(IMPL_MMU_SV39); + } else if (strncmp(mmu_type, "riscv,sv48", strlen("riscv,sv48")) == 0) { + procs[cpu_idx]->set_mmu_capability(IMPL_MMU_SV48); + } else if (strncmp(mmu_type, "riscv,sv57", strlen("riscv,sv57")) == 0) { + procs[cpu_idx]->set_mmu_capability(IMPL_MMU_SV57); + } else if (strncmp(mmu_type, "riscv,sbare", strlen("riscv,sbare")) == 0) { + //has been set in the beginning + } else { + std::cerr << "core (" + << cpu_idx + << ") has an invalid 'mmu-type': " + << mmu_type << ").\n"; + exit(1); + } + } else { + procs[cpu_idx]->set_mmu_capability(IMPL_MMU_SBARE); + } + + cpu_idx++; + } + + if (cpu_idx != nprocs()) { + std::cerr << "core number in dts (" + << cpu_idx + << ") doesn't match it in command line (" + << nprocs() << ").\n"; + exit(1); + } +} + +sim_t::~sim_t() +{ + for (size_t i = 0; i < procs.size(); i++) + delete procs[i]; + delete debug_mmu; +} + +void sim_thread_main(void* arg) +{ + ((sim_t*)arg)->main(); +} + +void sim_t::main() +{ + if (!debug && log) + set_procs_debug(true); + + while (!done()) + { + if (debug || ctrlc_pressed) + interactive(); + else + step(INTERLEAVE); + if (remote_bitbang) { + remote_bitbang->tick(); + } + } +} + +int sim_t::run() +{ + host = context_t::current(); + target.init(sim_thread_main, this); + return htif_t::run(); +} + +void sim_t::step(size_t n) +{ + for (size_t i = 0, steps = 0; i < n; i += steps) + { + steps = std::min(n - i, INTERLEAVE - current_step); + procs[current_proc]->step(steps); + + current_step += steps; + if (current_step == INTERLEAVE) + { + current_step = 0; + procs[current_proc]->get_mmu()->yield_load_reservation(); + if (++current_proc == procs.size()) { + current_proc = 0; + if (clint) clint->increment(INTERLEAVE / INSNS_PER_RTC_TICK); + } + + host->switch_to(); + } + } +} + +void sim_t::set_debug(bool value) +{ + debug = value; +} + +void sim_t::set_histogram(bool value) +{ + histogram_enabled = value; + for (size_t i = 0; i < procs.size(); i++) { + procs[i]->set_histogram(histogram_enabled); + } +} + +void sim_t::configure_log(bool enable_log, bool enable_commitlog) +{ + log = enable_log; + + if (!enable_commitlog) + return; + +#ifndef RISCV_ENABLE_COMMITLOG + fputs("Commit logging support has not been properly enabled; " + "please re-build the riscv-isa-sim project using " + "\"configure --enable-commitlog\".\n", + stderr); + abort(); +#else + for (processor_t *proc : procs) { + proc->enable_log_commits(); + } +#endif +} + +void sim_t::set_procs_debug(bool value) +{ + for (size_t i=0; i< procs.size(); i++) + procs[i]->set_debug(value); +} + +static bool paddr_ok(reg_t addr) +{ + return (addr >> MAX_PADDR_BITS) == 0; +} + +bool sim_t::mmio_load(reg_t addr, size_t len, uint8_t* bytes) +{ + if (addr + len < addr || !paddr_ok(addr + len - 1)) + return false; + return bus.load(addr, len, bytes); +} + +bool sim_t::mmio_store(reg_t addr, size_t len, const uint8_t* bytes) +{ + if (addr + len < addr || !paddr_ok(addr + len - 1)) + return false; + return bus.store(addr, len, bytes); +} + +void sim_t::make_dtb() +{ + if (!dtb_file.empty()) { + std::ifstream fin(dtb_file.c_str(), std::ios::binary); + if (!fin.good()) { + std::cerr << "can't find dtb file: " << dtb_file << std::endl; + exit(-1); + } + + std::stringstream strstream; + strstream << fin.rdbuf(); + + dtb = strstream.str(); + } else { + std::pair initrd_bounds = cfg->initrd_bounds(); + dts = make_dts(INSNS_PER_RTC_TICK, CPU_HZ, + initrd_bounds.first, initrd_bounds.second, + cfg->bootargs(), procs, mems); + dtb = dts_compile(dts); + } + + int fdt_code = fdt_check_header(dtb.c_str()); + if (fdt_code) { + std::cerr << "Failed to read DTB from "; + if (dtb_file.empty()) { + std::cerr << "auto-generated DTS string"; + } else { + std::cerr << "`" << dtb_file << "'"; + } + std::cerr << ": " << fdt_strerror(fdt_code) << ".\n"; + exit(-1); + } +} + +void sim_t::set_rom() +{ + const int reset_vec_size = 8; + + reg_t start_pc = cfg->start_pc.value_or(get_entry_point()); + + uint32_t reset_vec[reset_vec_size] = { + 0x297, // auipc t0,0x0 + 0x28593 + (reset_vec_size * 4 << 20), // addi a1, t0, &dtb + 0xf1402573, // csrr a0, mhartid + get_core(0)->get_xlen() == 32 ? + 0x0182a283u : // lw t0,24(t0) + 0x0182b283u, // ld t0,24(t0) + 0x28067, // jr t0 + 0, + (uint32_t) (start_pc & 0xffffffff), + (uint32_t) (start_pc >> 32) + }; + if (get_target_endianness() == memif_endianness_big) { + int i; + // Instuctions are little endian + for (i = 0; reset_vec[i] != 0; i++) + reset_vec[i] = to_le(reset_vec[i]); + // Data is big endian + for (; i < reset_vec_size; i++) + reset_vec[i] = to_be(reset_vec[i]); + + // Correct the high/low order of 64-bit start PC + if (get_core(0)->get_xlen() != 32) + std::swap(reset_vec[reset_vec_size-2], reset_vec[reset_vec_size-1]); + } else { + for (int i = 0; i < reset_vec_size; i++) + reset_vec[i] = to_le(reset_vec[i]); + } + + std::vector rom((char*)reset_vec, (char*)reset_vec + sizeof(reset_vec)); + + rom.insert(rom.end(), dtb.begin(), dtb.end()); + const int align = 0x1000; + rom.resize((rom.size() + align - 1) / align * align); + + boot_rom.reset(new rom_device_t(rom)); + bus.add_device(DEFAULT_RSTVEC, boot_rom.get()); +} + +char* sim_t::addr_to_mem(reg_t addr) { + if (!paddr_ok(addr)) + return NULL; + auto desc = bus.find_device(addr); + if (auto mem = dynamic_cast(desc.second)) + if (addr - desc.first < mem->size()) + return mem->contents(addr - desc.first); + return NULL; +} + +const char* sim_t::get_symbol(uint64_t addr) +{ + return htif_t::get_symbol(addr); +} + +// htif + +void sim_t::reset() +{ + if (dtb_enabled) + set_rom(); +} + +void sim_t::idle() +{ + target.switch_to(); +} + +void sim_t::read_chunk(addr_t taddr, size_t len, void* dst) +{ + assert(len == 8); + auto data = debug_mmu->to_target(debug_mmu->load_uint64(taddr)); + memcpy(dst, &data, sizeof data); +} + +void sim_t::write_chunk(addr_t taddr, size_t len, const void* src) +{ + assert(len == 8); + target_endian data; + memcpy(&data, src, sizeof data); + debug_mmu->store_uint64(taddr, debug_mmu->from_target(data)); +} + +void sim_t::set_target_endianness(memif_endianness_t endianness) +{ +#ifdef RISCV_ENABLE_DUAL_ENDIAN + assert(endianness == memif_endianness_little || endianness == memif_endianness_big); + + bool enable = endianness == memif_endianness_big; + debug_mmu->set_target_big_endian(enable); + for (size_t i = 0; i < procs.size(); i++) { + procs[i]->get_mmu()->set_target_big_endian(enable); + procs[i]->reset(); + } +#else + assert(endianness == memif_endianness_little); +#endif +} + +memif_endianness_t sim_t::get_target_endianness() const +{ +#ifdef RISCV_ENABLE_DUAL_ENDIAN + return debug_mmu->is_target_big_endian()? memif_endianness_big : memif_endianness_little; +#else + return memif_endianness_little; +#endif +} + +void sim_t::proc_reset(unsigned id) +{ + debug_module.proc_reset(id); +} diff --git a/vendor/riscv-isa-sim/riscv/sim.h b/vendor/riscv-isa-sim/riscv/sim.h new file mode 100644 index 00000000..97cada13 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/sim.h @@ -0,0 +1,175 @@ +// See LICENSE for license details. + +#ifndef _RISCV_SIM_H +#define _RISCV_SIM_H + +#include "config.h" + +#ifdef HAVE_BOOST_ASIO +#include +#include +#include +#endif + +#include "cfg.h" +#include "debug_module.h" +#include "devices.h" +#include "log_file.h" +#include "processor.h" +#include "simif.h" + +#include +#include +#include +#include +#include +#include + +class mmu_t; +class remote_bitbang_t; + +// this class encapsulates the processors and memory in a RISC-V machine. +class sim_t : public htif_t, public simif_t +{ +public: + sim_t(const cfg_t *cfg, bool halted, + std::vector> mems, + std::vector> plugin_devices, + const std::vector& args, + const debug_module_config_t &dm_config, const char *log_path, + bool dtb_enabled, const char *dtb_file, +#ifdef HAVE_BOOST_ASIO + boost::asio::io_service *io_service_ptr_ctor, boost::asio::ip::tcp::acceptor *acceptor_ptr_ctor, // option -s +#endif + FILE *cmd_file); // needed for command line option --cmd + ~sim_t(); + + // run the simulation to completion + int run(); + void set_debug(bool value); + void set_histogram(bool value); + + // Configure logging + // + // If enable_log is true, an instruction trace will be generated. If + // enable_commitlog is true, so will the commit results (if this + // build was configured without support for commit logging, the + // function will print an error message and abort). + void configure_log(bool enable_log, bool enable_commitlog); + + void set_procs_debug(bool value); + void set_remote_bitbang(remote_bitbang_t* remote_bitbang) { + this->remote_bitbang = remote_bitbang; + } + const char* get_dts() { if (dts.empty()) reset(); return dts.c_str(); } + processor_t* get_core(size_t i) { return procs.at(i); } + unsigned nprocs() const { return procs.size(); } + + // Callback for processors to let the simulation know they were reset. + void proc_reset(unsigned id); + +private: + isa_parser_t isa; + const cfg_t * const cfg; + std::vector> mems; + std::vector> plugin_devices; + mmu_t* debug_mmu; // debug port into main memory + std::vector procs; + std::pair initrd_range; + std::string dts; + std::string dtb; + std::string dtb_file; + bool dtb_enabled; + std::unique_ptr boot_rom; + std::unique_ptr clint; + bus_t bus; + log_file_t log_file; + + FILE *cmd_file; // pointer to debug command input file + +#ifdef HAVE_BOOST_ASIO + // the following are needed for command socket interface + boost::asio::io_service *io_service_ptr; + boost::asio::ip::tcp::acceptor *acceptor_ptr; + std::unique_ptr socket_ptr; + std::string rin(boost::asio::streambuf *bout_ptr); // read input command string + void wout(boost::asio::streambuf *bout_ptr); // write output to socket +#endif + std::ostream sout_; // used for socket and terminal interface + + processor_t* get_core(const std::string& i); + void step(size_t n); // step through simulation + static const size_t INTERLEAVE = 5000; + static const size_t INSNS_PER_RTC_TICK = 100; // 10 MHz clock for 1 BIPS core + static const size_t CPU_HZ = 1000000000; // 1GHz CPU + size_t current_step; + size_t current_proc; + bool debug; + bool histogram_enabled; // provide a histogram of PCs + bool log; + remote_bitbang_t* remote_bitbang; + + // memory-mapped I/O routines + char* addr_to_mem(reg_t addr); + bool mmio_load(reg_t addr, size_t len, uint8_t* bytes); + bool mmio_store(reg_t addr, size_t len, const uint8_t* bytes); + void make_dtb(); + void set_rom(); + + const char* get_symbol(uint64_t addr); + + // presents a prompt for introspection into the simulation + void interactive(); + + // functions that help implement interactive() + void interactive_help(const std::string& cmd, const std::vector& args); + void interactive_quit(const std::string& cmd, const std::vector& args); + void interactive_run(const std::string& cmd, const std::vector& args, bool noisy); + void interactive_run_noisy(const std::string& cmd, const std::vector& args); + void interactive_run_silent(const std::string& cmd, const std::vector& args); + void interactive_vreg(const std::string& cmd, const std::vector& args); + void interactive_reg(const std::string& cmd, const std::vector& args); + void interactive_freg(const std::string& cmd, const std::vector& args); + void interactive_fregh(const std::string& cmd, const std::vector& args); + void interactive_fregs(const std::string& cmd, const std::vector& args); + void interactive_fregd(const std::string& cmd, const std::vector& args); + void interactive_pc(const std::string& cmd, const std::vector& args); + void interactive_mem(const std::string& cmd, const std::vector& args); + void interactive_str(const std::string& cmd, const std::vector& args); + void interactive_until(const std::string& cmd, const std::vector& args, bool noisy); + void interactive_until_silent(const std::string& cmd, const std::vector& args); + void interactive_until_noisy(const std::string& cmd, const std::vector& args); + reg_t get_reg(const std::vector& args); + freg_t get_freg(const std::vector& args); + reg_t get_mem(const std::vector& args); + reg_t get_pc(const std::vector& args); + + friend class processor_t; + friend class mmu_t; + friend class debug_module_t; + + // htif + friend void sim_thread_main(void*); + void main(); + + context_t* host; + context_t target; + void reset(); + void idle(); + void read_chunk(addr_t taddr, size_t len, void* dst); + void write_chunk(addr_t taddr, size_t len, const void* src); + size_t chunk_align() { return 8; } + size_t chunk_max_size() { return 8; } + void set_target_endianness(memif_endianness_t endianness); + memif_endianness_t get_target_endianness() const; + +public: + // Initialize this after procs, because in debug_module_t::reset() we + // enumerate processors, which segfaults if procs hasn't been initialized + // yet. + debug_module_t debug_module; +}; + +extern volatile bool ctrlc_pressed; + +#endif diff --git a/vendor/riscv-isa-sim/riscv/simif.h b/vendor/riscv-isa-sim/riscv/simif.h new file mode 100644 index 00000000..0e75d45b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/simif.h @@ -0,0 +1,24 @@ +// See LICENSE for license details. + +#ifndef _RISCV_SIMIF_H +#define _RISCV_SIMIF_H + +#include "decode.h" + +// this is the interface to the simulator used by the processors and memory +class simif_t +{ +public: + // should return NULL for MMIO addresses + virtual char* addr_to_mem(reg_t addr) = 0; + // used for MMIO addresses + virtual bool mmio_load(reg_t addr, size_t len, uint8_t* bytes) = 0; + virtual bool mmio_store(reg_t addr, size_t len, const uint8_t* bytes) = 0; + // Callback for processors to let the simulation know they were reset. + virtual void proc_reset(unsigned id) = 0; + + virtual const char* get_symbol(uint64_t addr) = 0; + +}; + +#endif diff --git a/vendor/riscv-isa-sim/riscv/tracer.h b/vendor/riscv-isa-sim/riscv/tracer.h new file mode 100644 index 00000000..9f1bc784 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/tracer.h @@ -0,0 +1,11 @@ +// See LICENSE for license details. + +#ifndef _RISCV_TRACER_H +#define _RISCV_TRACER_H + +#include "processor.h" + +static inline void trace_opcode(processor_t* p, insn_bits_t opc, insn_t insn) { +} + +#endif diff --git a/vendor/riscv-isa-sim/riscv/trap.h b/vendor/riscv-isa-sim/riscv/trap.h new file mode 100644 index 00000000..1cd62e15 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/trap.h @@ -0,0 +1,116 @@ +// See LICENSE for license details. + +#ifndef _RISCV_TRAP_H +#define _RISCV_TRAP_H + +#include "decode.h" +#include + +struct state_t; + +class trap_t +{ + public: + trap_t(reg_t which) : which(which) {} + virtual bool has_gva() { return false; } + virtual bool has_tval() { return false; } + virtual reg_t get_tval() { return 0; } + virtual bool has_tval2() { return false; } + virtual reg_t get_tval2() { return 0; } + virtual bool has_tinst() { return false; } + virtual reg_t get_tinst() { return 0; } + reg_t cause() { return which; } + + virtual const char* name() + { + const char* fmt = uint8_t(which) == which ? "trap #%u" : "interrupt #%u"; + sprintf(_name, fmt, uint8_t(which)); + return _name; + } + + private: + char _name[16]; + reg_t which; +}; + +class insn_trap_t : public trap_t +{ + public: + insn_trap_t(reg_t which, bool gva, reg_t tval) + : trap_t(which), gva(gva), tval(tval) {} + bool has_gva() override { return gva; } + bool has_tval() override { return true; } + reg_t get_tval() override { return tval; } + private: + bool gva; + reg_t tval; +}; + +class mem_trap_t : public trap_t +{ + public: + mem_trap_t(reg_t which, bool gva, reg_t tval, reg_t tval2, reg_t tinst) + : trap_t(which), gva(gva), tval(tval), tval2(tval2), tinst(tinst) {} + bool has_gva() override { return gva; } + bool has_tval() override { return true; } + reg_t get_tval() override { return tval; } + bool has_tval2() override { return true; } + reg_t get_tval2() override { return tval2; } + bool has_tinst() override { return true; } + reg_t get_tinst() override { return tinst; } + private: + bool gva; + reg_t tval, tval2, tinst; +}; + +#define DECLARE_TRAP(n, x) class trap_##x : public trap_t { \ + public: \ + trap_##x() : trap_t(n) {} \ + const char* name() { return "trap_"#x; } \ +}; + +#define DECLARE_INST_TRAP(n, x) class trap_##x : public insn_trap_t { \ + public: \ + trap_##x(reg_t tval) : insn_trap_t(n, /*gva*/false, tval) {} \ + const char* name() { return "trap_"#x; } \ +}; + +#define DECLARE_INST_WITH_GVA_TRAP(n, x) class trap_##x : public insn_trap_t { \ + public: \ + trap_##x(bool gva, reg_t tval) : insn_trap_t(n, gva, tval) {} \ + const char* name() { return "trap_"#x; } \ +}; + +#define DECLARE_MEM_TRAP(n, x) class trap_##x : public mem_trap_t { \ + public: \ + trap_##x(bool gva, reg_t tval, reg_t tval2, reg_t tinst) : mem_trap_t(n, gva, tval, tval2, tinst) {} \ + const char* name() { return "trap_"#x; } \ +}; + +#define DECLARE_MEM_GVA_TRAP(n, x) class trap_##x : public mem_trap_t { \ + public: \ + trap_##x(reg_t tval, reg_t tval2, reg_t tinst) : mem_trap_t(n, true, tval, tval2, tinst) {} \ + const char* name() { return "trap_"#x; } \ +}; + +DECLARE_MEM_TRAP(CAUSE_MISALIGNED_FETCH, instruction_address_misaligned) +DECLARE_MEM_TRAP(CAUSE_FETCH_ACCESS, instruction_access_fault) +DECLARE_INST_TRAP(CAUSE_ILLEGAL_INSTRUCTION, illegal_instruction) +DECLARE_INST_WITH_GVA_TRAP(CAUSE_BREAKPOINT, breakpoint) +DECLARE_MEM_TRAP(CAUSE_MISALIGNED_LOAD, load_address_misaligned) +DECLARE_MEM_TRAP(CAUSE_MISALIGNED_STORE, store_address_misaligned) +DECLARE_MEM_TRAP(CAUSE_LOAD_ACCESS, load_access_fault) +DECLARE_MEM_TRAP(CAUSE_STORE_ACCESS, store_access_fault) +DECLARE_TRAP(CAUSE_USER_ECALL, user_ecall) +DECLARE_TRAP(CAUSE_SUPERVISOR_ECALL, supervisor_ecall) +DECLARE_TRAP(CAUSE_VIRTUAL_SUPERVISOR_ECALL, virtual_supervisor_ecall) +DECLARE_TRAP(CAUSE_MACHINE_ECALL, machine_ecall) +DECLARE_MEM_TRAP(CAUSE_FETCH_PAGE_FAULT, instruction_page_fault) +DECLARE_MEM_TRAP(CAUSE_LOAD_PAGE_FAULT, load_page_fault) +DECLARE_MEM_TRAP(CAUSE_STORE_PAGE_FAULT, store_page_fault) +DECLARE_MEM_GVA_TRAP(CAUSE_FETCH_GUEST_PAGE_FAULT, instruction_guest_page_fault) +DECLARE_MEM_GVA_TRAP(CAUSE_LOAD_GUEST_PAGE_FAULT, load_guest_page_fault) +DECLARE_INST_TRAP(CAUSE_VIRTUAL_INSTRUCTION, virtual_instruction) +DECLARE_MEM_GVA_TRAP(CAUSE_STORE_GUEST_PAGE_FAULT, store_guest_page_fault) + +#endif diff --git a/vendor/riscv-isa-sim/riscv/triggers.cc b/vendor/riscv-isa-sim/riscv/triggers.cc new file mode 100644 index 00000000..69888bf5 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/triggers.cc @@ -0,0 +1,206 @@ +#include "processor.h" +#include "triggers.h" + +namespace triggers { + +mcontrol_t::mcontrol_t() : + type(2), maskmax(0), select(false), timing(false), chain_bit(false), + match(MATCH_EQUAL), m(false), h(false), s(false), u(false), + execute_bit(false), store_bit(false), load_bit(false) +{ +} + +reg_t mcontrol_t::tdata1_read(const processor_t * const proc) const noexcept { + reg_t v = 0; + auto xlen = proc->get_xlen(); + v = set_field(v, MCONTROL_TYPE(xlen), type); + v = set_field(v, MCONTROL_DMODE(xlen), dmode); + v = set_field(v, MCONTROL_MASKMAX(xlen), maskmax); + v = set_field(v, MCONTROL_SELECT, select); + v = set_field(v, MCONTROL_TIMING, timing); + v = set_field(v, MCONTROL_ACTION, action); + v = set_field(v, MCONTROL_CHAIN, chain_bit); + v = set_field(v, MCONTROL_MATCH, match); + v = set_field(v, MCONTROL_M, m); + v = set_field(v, MCONTROL_H, h); + v = set_field(v, MCONTROL_S, s); + v = set_field(v, MCONTROL_U, u); + v = set_field(v, MCONTROL_EXECUTE, execute_bit); + v = set_field(v, MCONTROL_STORE, store_bit); + v = set_field(v, MCONTROL_LOAD, load_bit); + return v; +} + +bool mcontrol_t::tdata1_write(processor_t * const proc, const reg_t val) noexcept { + if (dmode && !proc->get_state()->debug_mode) { + return false; + } + auto xlen = proc->get_xlen(); + dmode = get_field(val, MCONTROL_DMODE(xlen)); + select = get_field(val, MCONTROL_SELECT); + timing = get_field(val, MCONTROL_TIMING); + action = (triggers::action_t) get_field(val, MCONTROL_ACTION); + chain_bit = get_field(val, MCONTROL_CHAIN); + unsigned match_value = get_field(val, MCONTROL_MATCH); + switch (match_value) { + case MATCH_EQUAL: + case MATCH_NAPOT: + case MATCH_GE: + case MATCH_LT: + case MATCH_MASK_LOW: + case MATCH_MASK_HIGH: + match = (triggers::mcontrol_t::match_t) match_value; + break; + default: + match = MATCH_EQUAL; + break; + } + m = get_field(val, MCONTROL_M); + h = get_field(val, MCONTROL_H); + s = get_field(val, MCONTROL_S); + u = get_field(val, MCONTROL_U); + execute_bit = get_field(val, MCONTROL_EXECUTE); + store_bit = get_field(val, MCONTROL_STORE); + load_bit = get_field(val, MCONTROL_LOAD); + // Assume we're here because of csrw. + if (execute_bit) + timing = 0; + return true; +} + +reg_t mcontrol_t::tdata2_read(const processor_t * const proc) const noexcept { + return tdata2; +} + +bool mcontrol_t::tdata2_write(processor_t * const proc, const reg_t val) noexcept { + if (dmode && !proc->get_state()->debug_mode) { + return false; + } + tdata2 = val; + return true; +} + +bool mcontrol_t::simple_match(unsigned xlen, reg_t value) const { + switch (match) { + case triggers::mcontrol_t::MATCH_EQUAL: + return value == tdata2; + case triggers::mcontrol_t::MATCH_NAPOT: + { + reg_t mask = ~((1 << (cto(tdata2)+1)) - 1); + return (value & mask) == (tdata2 & mask); + } + case triggers::mcontrol_t::MATCH_GE: + return value >= tdata2; + case triggers::mcontrol_t::MATCH_LT: + return value < tdata2; + case triggers::mcontrol_t::MATCH_MASK_LOW: + { + reg_t mask = tdata2 >> (xlen/2); + return (value & mask) == (tdata2 & mask); + } + case triggers::mcontrol_t::MATCH_MASK_HIGH: + { + reg_t mask = tdata2 >> (xlen/2); + return ((value >> (xlen/2)) & mask) == (tdata2 & mask); + } + } + assert(0); +} + +match_result_t mcontrol_t::memory_access_match(processor_t * const proc, operation_t operation, reg_t address, reg_t data) { + state_t * const state = proc->get_state(); + if ((operation == triggers::OPERATION_EXECUTE && !execute_bit) || + (operation == triggers::OPERATION_STORE && !store_bit) || + (operation == triggers::OPERATION_LOAD && !load_bit) || + (state->prv == PRV_M && !m) || + (state->prv == PRV_S && !s) || + (state->prv == PRV_U && !u)) { + return MATCH_NONE; + } + + reg_t value; + if (select) { + value = data; + } else { + value = address; + } + + // We need this because in 32-bit mode sometimes the PC bits get sign + // extended. + auto xlen = proc->get_xlen(); + if (xlen == 32) { + value &= 0xffffffff; + } + + if (simple_match(xlen, value)) { + if (timing) + return MATCH_FIRE_AFTER; + else + return MATCH_FIRE_BEFORE; + } + return MATCH_NONE; +} + +module_t::module_t(unsigned count) : triggers(count) { + for (unsigned i = 0; i < count; i++) { + triggers[i] = new mcontrol_t(); + } +} + +module_t::~module_t() { + for (auto trigger : triggers) { + delete trigger; + } +} + +match_result_t module_t::memory_access_match(action_t * const action, operation_t operation, reg_t address, reg_t data) +{ + state_t * const state = proc->get_state(); + if (state->debug_mode) + return MATCH_NONE; + + bool chain_ok = true; + + for (unsigned int i = 0; i < triggers.size(); i++) { + if (!chain_ok) { + chain_ok |= !triggers[i]->chain(); + continue; + } + + match_result_t result = triggers[i]->memory_access_match(proc, operation, address, data); + if (result != MATCH_NONE && !triggers[i]->chain()) { + *action = triggers[i]->action; + return result; + } + + chain_ok = true; + } + return MATCH_NONE; +} + +reg_t module_t::tdata1_read(const processor_t * const proc, unsigned index) const noexcept +{ + return triggers[index]->tdata1_read(proc); +} + +bool module_t::tdata1_write(processor_t * const proc, unsigned index, const reg_t val) noexcept +{ + bool result = triggers[index]->tdata1_write(proc, val); + proc->trigger_updated(triggers); + return result; +} + +reg_t module_t::tdata2_read(const processor_t * const proc, unsigned index) const noexcept +{ + return triggers[index]->tdata2_read(proc); +} + +bool module_t::tdata2_write(processor_t * const proc, unsigned index, const reg_t val) noexcept +{ + bool result = triggers[index]->tdata2_write(proc, val); + proc->trigger_updated(triggers); + return result; +} + + +}; diff --git a/vendor/riscv-isa-sim/riscv/triggers.h b/vendor/riscv-isa-sim/riscv/triggers.h new file mode 100644 index 00000000..ad294c8a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/triggers.h @@ -0,0 +1,138 @@ +#ifndef _RISCV_TRIGGERS_H +#define _RISCV_TRIGGERS_H + +#include + +#include "decode.h" + +namespace triggers { + +typedef enum { + OPERATION_EXECUTE, + OPERATION_STORE, + OPERATION_LOAD, +} operation_t; + +typedef enum +{ + ACTION_DEBUG_EXCEPTION = MCONTROL_ACTION_DEBUG_EXCEPTION, + ACTION_DEBUG_MODE = MCONTROL_ACTION_DEBUG_MODE, + ACTION_TRACE_START = MCONTROL_ACTION_TRACE_START, + ACTION_TRACE_STOP = MCONTROL_ACTION_TRACE_STOP, + ACTION_TRACE_EMIT = MCONTROL_ACTION_TRACE_EMIT +} action_t; + +typedef enum { + MATCH_NONE, + MATCH_FIRE_BEFORE, + MATCH_FIRE_AFTER +} match_result_t; + +class matched_t +{ + public: + matched_t(triggers::operation_t operation, reg_t address, reg_t data, action_t action) : + operation(operation), address(address), data(data), action(action) {} + + triggers::operation_t operation; + reg_t address; + reg_t data; + action_t action; +}; + +class trigger_t { +public: + virtual match_result_t memory_access_match(processor_t * const proc, + operation_t operation, reg_t address, reg_t data) = 0; + + virtual reg_t tdata1_read(const processor_t * const proc) const noexcept = 0; + virtual bool tdata1_write(processor_t * const proc, const reg_t val) noexcept = 0; + virtual reg_t tdata2_read(const processor_t * const proc) const noexcept = 0; + virtual bool tdata2_write(processor_t * const proc, const reg_t val) noexcept = 0; + + virtual bool chain() const { return false; } + virtual bool execute() const { return false; } + virtual bool store() const { return false; } + virtual bool load() const { return false; } + +public: + bool dmode; + action_t action; + + virtual ~trigger_t() {}; + +protected: + trigger_t() : dmode(false), action(ACTION_DEBUG_EXCEPTION) {}; +}; + +class mcontrol_t : public trigger_t { +public: + typedef enum + { + MATCH_EQUAL = MCONTROL_MATCH_EQUAL, + MATCH_NAPOT = MCONTROL_MATCH_NAPOT, + MATCH_GE = MCONTROL_MATCH_GE, + MATCH_LT = MCONTROL_MATCH_LT, + MATCH_MASK_LOW = MCONTROL_MATCH_MASK_LOW, + MATCH_MASK_HIGH = MCONTROL_MATCH_MASK_HIGH + } match_t; + + mcontrol_t(); + + virtual reg_t tdata1_read(const processor_t * const proc) const noexcept override; + virtual bool tdata1_write(processor_t * const proc, const reg_t val) noexcept override; + virtual reg_t tdata2_read(const processor_t * const proc) const noexcept override; + virtual bool tdata2_write(processor_t * const proc, const reg_t val) noexcept override; + + virtual bool chain() const override { return chain_bit; } + virtual bool execute() const override { return execute_bit; } + virtual bool store() const override { return store_bit; } + virtual bool load() const override { return load_bit; } + + virtual match_result_t memory_access_match(processor_t * const proc, + operation_t operation, reg_t address, reg_t data) override; + +private: + bool simple_match(unsigned xlen, reg_t value) const; + +public: + uint8_t type; + uint8_t maskmax; + bool select; + bool timing; + bool chain_bit; + match_t match; + bool m; + bool h; + bool s; + bool u; + bool execute_bit; + bool store_bit; + bool load_bit; + reg_t tdata2; + +}; + +class module_t { +public: + module_t(unsigned count); + ~module_t(); + + unsigned count() const { return triggers.size(); } + + match_result_t memory_access_match(action_t * const action, + operation_t operation, reg_t address, reg_t data); + + reg_t tdata1_read(const processor_t * const proc, unsigned index) const noexcept; + bool tdata1_write(processor_t * const proc, unsigned index, const reg_t val) noexcept; + reg_t tdata2_read(const processor_t * const proc, unsigned index) const noexcept; + bool tdata2_write(processor_t * const proc, unsigned index, const reg_t val) noexcept; + + processor_t *proc; +private: + std::vector triggers; +}; + +}; + +#endif diff --git a/vendor/riscv-isa-sim/scripts/config.guess b/vendor/riscv-isa-sim/scripts/config.guess new file mode 100644 index 00000000..699b3a10 --- /dev/null +++ b/vendor/riscv-isa-sim/scripts/config.guess @@ -0,0 +1,1698 @@ +#! /bin/sh +# Attempt to guess a canonical system name. +# Copyright 1992-2020 Free Software Foundation, Inc. + +timestamp='2020-11-19' + +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see . +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that +# program. This Exception is an additional permission under section 7 +# of the GNU General Public License, version 3 ("GPLv3"). +# +# Originally written by Per Bothner; maintained since 2000 by Ben Elliston. +# +# You can get the latest version of this script from: +# https://git.savannah.gnu.org/cgit/config.git/plain/config.guess +# +# Please send patches to . + + +me=$(echo "$0" | sed -e 's,.*/,,') + +usage="\ +Usage: $0 [OPTION] + +Output the configuration name of the system \`$me' is run on. + +Options: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.guess ($timestamp) + +Originally written by Per Bothner. +Copyright 1992-2020 Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" >&2 + exit 1 ;; + * ) + break ;; + esac +done + +if test $# != 0; then + echo "$me: too many arguments$help" >&2 + exit 1 +fi + +# CC_FOR_BUILD -- compiler used by this script. Note that the use of a +# compiler to aid in system detection is discouraged as it requires +# temporary files to be created and, as you can see below, it is a +# headache to deal with in a portable fashion. + +# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still +# use `HOST_CC' if defined, but it is deprecated. + +# Portable tmp directory creation inspired by the Autoconf team. + +tmp= +# shellcheck disable=SC2172 +trap 'test -z "$tmp" || rm -fr "$tmp"' 0 1 2 13 15 + +set_cc_for_build() { + # prevent multiple calls if $tmp is already set + test "$tmp" && return 0 + : "${TMPDIR=/tmp}" + # shellcheck disable=SC2039 + { tmp=$( (umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null) && test -n "$tmp" && test -d "$tmp" ; } || + { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir "$tmp" 2>/dev/null) ; } || + { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir "$tmp" 2>/dev/null) && echo "Warning: creating insecure temp directory" >&2 ; } || + { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } + dummy=$tmp/dummy + case ${CC_FOR_BUILD-},${HOST_CC-},${CC-} in + ,,) echo "int x;" > "$dummy.c" + for driver in cc gcc c89 c99 ; do + if ($driver -c -o "$dummy.o" "$dummy.c") >/dev/null 2>&1 ; then + CC_FOR_BUILD="$driver" + break + fi + done + if test x"$CC_FOR_BUILD" = x ; then + CC_FOR_BUILD=no_compiler_found + fi + ;; + ,,*) CC_FOR_BUILD=$CC ;; + ,*,*) CC_FOR_BUILD=$HOST_CC ;; + esac +} + +# This is needed to find uname on a Pyramid OSx when run in the BSD universe. +# (ghazi@noc.rutgers.edu 1994-08-24) +if test -f /.attbin/uname ; then + PATH=$PATH:/.attbin ; export PATH +fi + +UNAME_MACHINE=$( (uname -m) 2>/dev/null) || UNAME_MACHINE=unknown +UNAME_RELEASE=$( (uname -r) 2>/dev/null) || UNAME_RELEASE=unknown +UNAME_SYSTEM=$( (uname -s) 2>/dev/null) || UNAME_SYSTEM=unknown +UNAME_VERSION=$( (uname -v) 2>/dev/null) || UNAME_VERSION=unknown + +case "$UNAME_SYSTEM" in +Linux|GNU|GNU/*) + LIBC=unknown + + set_cc_for_build + cat <<-EOF > "$dummy.c" + #include + #if defined(__UCLIBC__) + LIBC=uclibc + #elif defined(__dietlibc__) + LIBC=dietlibc + #elif defined(__GLIBC__) + LIBC=gnu + #else + #include + /* First heuristic to detect musl libc. */ + #ifdef __DEFINED_va_list + LIBC=musl + #endif + #endif + EOF + eval "$($CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^LIBC' | sed 's, ,,g')" + + # Second heuristic to detect musl libc. + if [ "$LIBC" = unknown ] && + command -v ldd >/dev/null && + ldd --version 2>&1 | grep -q ^musl; then + LIBC=musl + fi + + # If the system lacks a compiler, then just pick glibc. + # We could probably try harder. + if [ "$LIBC" = unknown ]; then + LIBC=gnu + fi + ;; +esac + +# Note: order is significant - the case branches are not exclusive. + +case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in + *:NetBSD:*:*) + # NetBSD (nbsd) targets should (where applicable) match one or + # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, + # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently + # switched to ELF, *-*-netbsd* would select the old + # object file format. This provides both forward + # compatibility and a consistent mechanism for selecting the + # object file format. + # + # Note: NetBSD doesn't particularly care about the vendor + # portion of the name. We always set it to "unknown". + sysctl="sysctl -n hw.machine_arch" + UNAME_MACHINE_ARCH=$( (uname -p 2>/dev/null || \ + "/sbin/$sysctl" 2>/dev/null || \ + "/usr/sbin/$sysctl" 2>/dev/null || \ + echo unknown)) + case "$UNAME_MACHINE_ARCH" in + aarch64eb) machine=aarch64_be-unknown ;; + armeb) machine=armeb-unknown ;; + arm*) machine=arm-unknown ;; + sh3el) machine=shl-unknown ;; + sh3eb) machine=sh-unknown ;; + sh5el) machine=sh5le-unknown ;; + earmv*) + arch=$(echo "$UNAME_MACHINE_ARCH" | sed -e 's,^e\(armv[0-9]\).*$,\1,') + endian=$(echo "$UNAME_MACHINE_ARCH" | sed -ne 's,^.*\(eb\)$,\1,p') + machine="${arch}${endian}"-unknown + ;; + *) machine="$UNAME_MACHINE_ARCH"-unknown ;; + esac + # The Operating System including object format, if it has switched + # to ELF recently (or will in the future) and ABI. + case "$UNAME_MACHINE_ARCH" in + earm*) + os=netbsdelf + ;; + arm*|i386|m68k|ns32k|sh3*|sparc|vax) + set_cc_for_build + if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ELF__ + then + # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). + # Return netbsd for either. FIX? + os=netbsd + else + os=netbsdelf + fi + ;; + *) + os=netbsd + ;; + esac + # Determine ABI tags. + case "$UNAME_MACHINE_ARCH" in + earm*) + expr='s/^earmv[0-9]/-eabi/;s/eb$//' + abi=$(echo "$UNAME_MACHINE_ARCH" | sed -e "$expr") + ;; + esac + # The OS release + # Debian GNU/NetBSD machines have a different userland, and + # thus, need a distinct triplet. However, they do not need + # kernel version information, so it can be replaced with a + # suitable tag, in the style of linux-gnu. + case "$UNAME_VERSION" in + Debian*) + release='-gnu' + ;; + *) + release=$(echo "$UNAME_RELEASE" | sed -e 's/[-_].*//' | cut -d. -f1,2) + ;; + esac + # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: + # contains redundant information, the shorter form: + # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. + echo "$machine-${os}${release}${abi-}" + exit ;; + *:Bitrig:*:*) + UNAME_MACHINE_ARCH=$(arch | sed 's/Bitrig.//') + echo "$UNAME_MACHINE_ARCH"-unknown-bitrig"$UNAME_RELEASE" + exit ;; + *:OpenBSD:*:*) + UNAME_MACHINE_ARCH=$(arch | sed 's/OpenBSD.//') + echo "$UNAME_MACHINE_ARCH"-unknown-openbsd"$UNAME_RELEASE" + exit ;; + *:LibertyBSD:*:*) + UNAME_MACHINE_ARCH=$(arch | sed 's/^.*BSD\.//') + echo "$UNAME_MACHINE_ARCH"-unknown-libertybsd"$UNAME_RELEASE" + exit ;; + *:MidnightBSD:*:*) + echo "$UNAME_MACHINE"-unknown-midnightbsd"$UNAME_RELEASE" + exit ;; + *:ekkoBSD:*:*) + echo "$UNAME_MACHINE"-unknown-ekkobsd"$UNAME_RELEASE" + exit ;; + *:SolidBSD:*:*) + echo "$UNAME_MACHINE"-unknown-solidbsd"$UNAME_RELEASE" + exit ;; + *:OS108:*:*) + echo "$UNAME_MACHINE"-unknown-os108_"$UNAME_RELEASE" + exit ;; + macppc:MirBSD:*:*) + echo powerpc-unknown-mirbsd"$UNAME_RELEASE" + exit ;; + *:MirBSD:*:*) + echo "$UNAME_MACHINE"-unknown-mirbsd"$UNAME_RELEASE" + exit ;; + *:Sortix:*:*) + echo "$UNAME_MACHINE"-unknown-sortix + exit ;; + *:Twizzler:*:*) + echo "$UNAME_MACHINE"-unknown-twizzler + exit ;; + *:Redox:*:*) + echo "$UNAME_MACHINE"-unknown-redox + exit ;; + mips:OSF1:*.*) + echo mips-dec-osf1 + exit ;; + alpha:OSF1:*:*) + case $UNAME_RELEASE in + *4.0) + UNAME_RELEASE=$(/usr/sbin/sizer -v | awk '{print $3}') + ;; + *5.*) + UNAME_RELEASE=$(/usr/sbin/sizer -v | awk '{print $4}') + ;; + esac + # According to Compaq, /usr/sbin/psrinfo has been available on + # OSF/1 and Tru64 systems produced since 1995. I hope that + # covers most systems running today. This code pipes the CPU + # types through head -n 1, so we only detect the type of CPU 0. + ALPHA_CPU_TYPE=$(/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1) + case "$ALPHA_CPU_TYPE" in + "EV4 (21064)") + UNAME_MACHINE=alpha ;; + "EV4.5 (21064)") + UNAME_MACHINE=alpha ;; + "LCA4 (21066/21068)") + UNAME_MACHINE=alpha ;; + "EV5 (21164)") + UNAME_MACHINE=alphaev5 ;; + "EV5.6 (21164A)") + UNAME_MACHINE=alphaev56 ;; + "EV5.6 (21164PC)") + UNAME_MACHINE=alphapca56 ;; + "EV5.7 (21164PC)") + UNAME_MACHINE=alphapca57 ;; + "EV6 (21264)") + UNAME_MACHINE=alphaev6 ;; + "EV6.7 (21264A)") + UNAME_MACHINE=alphaev67 ;; + "EV6.8CB (21264C)") + UNAME_MACHINE=alphaev68 ;; + "EV6.8AL (21264B)") + UNAME_MACHINE=alphaev68 ;; + "EV6.8CX (21264D)") + UNAME_MACHINE=alphaev68 ;; + "EV6.9A (21264/EV69A)") + UNAME_MACHINE=alphaev69 ;; + "EV7 (21364)") + UNAME_MACHINE=alphaev7 ;; + "EV7.9 (21364A)") + UNAME_MACHINE=alphaev79 ;; + esac + # A Pn.n version is a patched version. + # A Vn.n version is a released version. + # A Tn.n version is a released field test version. + # A Xn.n version is an unreleased experimental baselevel. + # 1.2 uses "1.2" for uname -r. + echo "$UNAME_MACHINE"-dec-osf"$(echo "$UNAME_RELEASE" | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz)" + # Reset EXIT trap before exiting to avoid spurious non-zero exit code. + exitcode=$? + trap '' 0 + exit $exitcode ;; + Amiga*:UNIX_System_V:4.0:*) + echo m68k-unknown-sysv4 + exit ;; + *:[Aa]miga[Oo][Ss]:*:*) + echo "$UNAME_MACHINE"-unknown-amigaos + exit ;; + *:[Mm]orph[Oo][Ss]:*:*) + echo "$UNAME_MACHINE"-unknown-morphos + exit ;; + *:OS/390:*:*) + echo i370-ibm-openedition + exit ;; + *:z/VM:*:*) + echo s390-ibm-zvmoe + exit ;; + *:OS400:*:*) + echo powerpc-ibm-os400 + exit ;; + arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) + echo arm-acorn-riscix"$UNAME_RELEASE" + exit ;; + arm*:riscos:*:*|arm*:RISCOS:*:*) + echo arm-unknown-riscos + exit ;; + SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) + echo hppa1.1-hitachi-hiuxmpp + exit ;; + Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) + # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. + if test "$( (/bin/universe) 2>/dev/null)" = att ; then + echo pyramid-pyramid-sysv3 + else + echo pyramid-pyramid-bsd + fi + exit ;; + NILE*:*:*:dcosx) + echo pyramid-pyramid-svr4 + exit ;; + DRS?6000:unix:4.0:6*) + echo sparc-icl-nx6 + exit ;; + DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) + case $(/usr/bin/uname -p) in + sparc) echo sparc-icl-nx7; exit ;; + esac ;; + s390x:SunOS:*:*) + echo "$UNAME_MACHINE"-ibm-solaris2"$(echo "$UNAME_RELEASE" | sed -e 's/[^.]*//')" + exit ;; + sun4H:SunOS:5.*:*) + echo sparc-hal-solaris2"$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*//')" + exit ;; + sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) + echo sparc-sun-solaris2"$(echo "$UNAME_RELEASE" | sed -e 's/[^.]*//')" + exit ;; + i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) + echo i386-pc-auroraux"$UNAME_RELEASE" + exit ;; + i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) + set_cc_for_build + SUN_ARCH=i386 + # If there is a compiler, see if it is configured for 64-bit objects. + # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. + # This test works for both compilers. + if test "$CC_FOR_BUILD" != no_compiler_found; then + if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + SUN_ARCH=x86_64 + fi + fi + echo "$SUN_ARCH"-pc-solaris2"$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*//')" + exit ;; + sun4*:SunOS:6*:*) + # According to config.sub, this is the proper way to canonicalize + # SunOS6. Hard to guess exactly what SunOS6 will be like, but + # it's likely to be more like Solaris than SunOS4. + echo sparc-sun-solaris3"$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*//')" + exit ;; + sun4*:SunOS:*:*) + case "$(/usr/bin/arch -k)" in + Series*|S4*) + UNAME_RELEASE=$(uname -v) + ;; + esac + # Japanese Language versions have a version number like `4.1.3-JL'. + echo sparc-sun-sunos"$(echo "$UNAME_RELEASE"|sed -e 's/-/_/')" + exit ;; + sun3*:SunOS:*:*) + echo m68k-sun-sunos"$UNAME_RELEASE" + exit ;; + sun*:*:4.2BSD:*) + UNAME_RELEASE=$( (sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null) + test "x$UNAME_RELEASE" = x && UNAME_RELEASE=3 + case "$(/bin/arch)" in + sun3) + echo m68k-sun-sunos"$UNAME_RELEASE" + ;; + sun4) + echo sparc-sun-sunos"$UNAME_RELEASE" + ;; + esac + exit ;; + aushp:SunOS:*:*) + echo sparc-auspex-sunos"$UNAME_RELEASE" + exit ;; + # The situation for MiNT is a little confusing. The machine name + # can be virtually everything (everything which is not + # "atarist" or "atariste" at least should have a processor + # > m68000). The system name ranges from "MiNT" over "FreeMiNT" + # to the lowercase version "mint" (or "freemint"). Finally + # the system name "TOS" denotes a system which is actually not + # MiNT. But MiNT is downward compatible to TOS, so this should + # be no problem. + atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint"$UNAME_RELEASE" + exit ;; + atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint"$UNAME_RELEASE" + exit ;; + *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) + echo m68k-atari-mint"$UNAME_RELEASE" + exit ;; + milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) + echo m68k-milan-mint"$UNAME_RELEASE" + exit ;; + hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) + echo m68k-hades-mint"$UNAME_RELEASE" + exit ;; + *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) + echo m68k-unknown-mint"$UNAME_RELEASE" + exit ;; + m68k:machten:*:*) + echo m68k-apple-machten"$UNAME_RELEASE" + exit ;; + powerpc:machten:*:*) + echo powerpc-apple-machten"$UNAME_RELEASE" + exit ;; + RISC*:Mach:*:*) + echo mips-dec-mach_bsd4.3 + exit ;; + RISC*:ULTRIX:*:*) + echo mips-dec-ultrix"$UNAME_RELEASE" + exit ;; + VAX*:ULTRIX*:*:*) + echo vax-dec-ultrix"$UNAME_RELEASE" + exit ;; + 2020:CLIX:*:* | 2430:CLIX:*:*) + echo clipper-intergraph-clix"$UNAME_RELEASE" + exit ;; + mips:*:*:UMIPS | mips:*:*:RISCos) + set_cc_for_build + sed 's/^ //' << EOF > "$dummy.c" +#ifdef __cplusplus +#include /* for printf() prototype */ + int main (int argc, char *argv[]) { +#else + int main (argc, argv) int argc; char *argv[]; { +#endif + #if defined (host_mips) && defined (MIPSEB) + #if defined (SYSTYPE_SYSV) + printf ("mips-mips-riscos%ssysv\\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_SVR4) + printf ("mips-mips-riscos%ssvr4\\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) + printf ("mips-mips-riscos%sbsd\\n", argv[1]); exit (0); + #endif + #endif + exit (-1); + } +EOF + $CC_FOR_BUILD -o "$dummy" "$dummy.c" && + dummyarg=$(echo "$UNAME_RELEASE" | sed -n 's/\([0-9]*\).*/\1/p') && + SYSTEM_NAME=$("$dummy" "$dummyarg") && + { echo "$SYSTEM_NAME"; exit; } + echo mips-mips-riscos"$UNAME_RELEASE" + exit ;; + Motorola:PowerMAX_OS:*:*) + echo powerpc-motorola-powermax + exit ;; + Motorola:*:4.3:PL8-*) + echo powerpc-harris-powermax + exit ;; + Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) + echo powerpc-harris-powermax + exit ;; + Night_Hawk:Power_UNIX:*:*) + echo powerpc-harris-powerunix + exit ;; + m88k:CX/UX:7*:*) + echo m88k-harris-cxux7 + exit ;; + m88k:*:4*:R4*) + echo m88k-motorola-sysv4 + exit ;; + m88k:*:3*:R3*) + echo m88k-motorola-sysv3 + exit ;; + AViiON:dgux:*:*) + # DG/UX returns AViiON for all architectures + UNAME_PROCESSOR=$(/usr/bin/uname -p) + if test "$UNAME_PROCESSOR" = mc88100 || test "$UNAME_PROCESSOR" = mc88110 + then + if test "$TARGET_BINARY_INTERFACE"x = m88kdguxelfx || \ + test "$TARGET_BINARY_INTERFACE"x = x + then + echo m88k-dg-dgux"$UNAME_RELEASE" + else + echo m88k-dg-dguxbcs"$UNAME_RELEASE" + fi + else + echo i586-dg-dgux"$UNAME_RELEASE" + fi + exit ;; + M88*:DolphinOS:*:*) # DolphinOS (SVR3) + echo m88k-dolphin-sysv3 + exit ;; + M88*:*:R3*:*) + # Delta 88k system running SVR3 + echo m88k-motorola-sysv3 + exit ;; + XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) + echo m88k-tektronix-sysv3 + exit ;; + Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) + echo m68k-tektronix-bsd + exit ;; + *:IRIX*:*:*) + echo mips-sgi-irix"$(echo "$UNAME_RELEASE"|sed -e 's/-/_/g')" + exit ;; + ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. + echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id + exit ;; # Note that: echo "'$(uname -s)'" gives 'AIX ' + i*86:AIX:*:*) + echo i386-ibm-aix + exit ;; + ia64:AIX:*:*) + if test -x /usr/bin/oslevel ; then + IBM_REV=$(/usr/bin/oslevel) + else + IBM_REV="$UNAME_VERSION.$UNAME_RELEASE" + fi + echo "$UNAME_MACHINE"-ibm-aix"$IBM_REV" + exit ;; + *:AIX:2:3) + if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then + set_cc_for_build + sed 's/^ //' << EOF > "$dummy.c" + #include + + main() + { + if (!__power_pc()) + exit(1); + puts("powerpc-ibm-aix3.2.5"); + exit(0); + } +EOF + if $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=$("$dummy") + then + echo "$SYSTEM_NAME" + else + echo rs6000-ibm-aix3.2.5 + fi + elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then + echo rs6000-ibm-aix3.2.4 + else + echo rs6000-ibm-aix3.2 + fi + exit ;; + *:AIX:*:[4567]) + IBM_CPU_ID=$(/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }') + if /usr/sbin/lsattr -El "$IBM_CPU_ID" | grep ' POWER' >/dev/null 2>&1; then + IBM_ARCH=rs6000 + else + IBM_ARCH=powerpc + fi + if test -x /usr/bin/lslpp ; then + IBM_REV=$(/usr/bin/lslpp -Lqc bos.rte.libc | + awk -F: '{ print $3 }' | sed s/[0-9]*$/0/) + else + IBM_REV="$UNAME_VERSION.$UNAME_RELEASE" + fi + echo "$IBM_ARCH"-ibm-aix"$IBM_REV" + exit ;; + *:AIX:*:*) + echo rs6000-ibm-aix + exit ;; + ibmrt:4.4BSD:*|romp-ibm:4.4BSD:*) + echo romp-ibm-bsd4.4 + exit ;; + ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and + echo romp-ibm-bsd"$UNAME_RELEASE" # 4.3 with uname added to + exit ;; # report: romp-ibm BSD 4.3 + *:BOSX:*:*) + echo rs6000-bull-bosx + exit ;; + DPX/2?00:B.O.S.:*:*) + echo m68k-bull-sysv3 + exit ;; + 9000/[34]??:4.3bsd:1.*:*) + echo m68k-hp-bsd + exit ;; + hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) + echo m68k-hp-bsd4.4 + exit ;; + 9000/[34678]??:HP-UX:*:*) + HPUX_REV=$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//') + case "$UNAME_MACHINE" in + 9000/31?) HP_ARCH=m68000 ;; + 9000/[34]??) HP_ARCH=m68k ;; + 9000/[678][0-9][0-9]) + if test -x /usr/bin/getconf; then + sc_cpu_version=$(/usr/bin/getconf SC_CPU_VERSION 2>/dev/null) + sc_kernel_bits=$(/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null) + case "$sc_cpu_version" in + 523) HP_ARCH=hppa1.0 ;; # CPU_PA_RISC1_0 + 528) HP_ARCH=hppa1.1 ;; # CPU_PA_RISC1_1 + 532) # CPU_PA_RISC2_0 + case "$sc_kernel_bits" in + 32) HP_ARCH=hppa2.0n ;; + 64) HP_ARCH=hppa2.0w ;; + '') HP_ARCH=hppa2.0 ;; # HP-UX 10.20 + esac ;; + esac + fi + if test "$HP_ARCH" = ""; then + set_cc_for_build + sed 's/^ //' << EOF > "$dummy.c" + + #define _HPUX_SOURCE + #include + #include + + int main () + { + #if defined(_SC_KERNEL_BITS) + long bits = sysconf(_SC_KERNEL_BITS); + #endif + long cpu = sysconf (_SC_CPU_VERSION); + + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1"); break; + case CPU_PA_RISC2_0: + #if defined(_SC_KERNEL_BITS) + switch (bits) + { + case 64: puts ("hppa2.0w"); break; + case 32: puts ("hppa2.0n"); break; + default: puts ("hppa2.0"); break; + } break; + #else /* !defined(_SC_KERNEL_BITS) */ + puts ("hppa2.0"); break; + #endif + default: puts ("hppa1.0"); break; + } + exit (0); + } +EOF + (CCOPTS="" $CC_FOR_BUILD -o "$dummy" "$dummy.c" 2>/dev/null) && HP_ARCH=$("$dummy") + test -z "$HP_ARCH" && HP_ARCH=hppa + fi ;; + esac + if test "$HP_ARCH" = hppa2.0w + then + set_cc_for_build + + # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating + # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler + # generating 64-bit code. GNU and HP use different nomenclature: + # + # $ CC_FOR_BUILD=cc ./config.guess + # => hppa2.0w-hp-hpux11.23 + # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess + # => hppa64-hp-hpux11.23 + + if echo __LP64__ | (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | + grep -q __LP64__ + then + HP_ARCH=hppa2.0w + else + HP_ARCH=hppa64 + fi + fi + echo "$HP_ARCH"-hp-hpux"$HPUX_REV" + exit ;; + ia64:HP-UX:*:*) + HPUX_REV=$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//') + echo ia64-hp-hpux"$HPUX_REV" + exit ;; + 3050*:HI-UX:*:*) + set_cc_for_build + sed 's/^ //' << EOF > "$dummy.c" + #include + int + main () + { + long cpu = sysconf (_SC_CPU_VERSION); + /* The order matters, because CPU_IS_HP_MC68K erroneously returns + true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct + results, however. */ + if (CPU_IS_PA_RISC (cpu)) + { + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; + case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; + default: puts ("hppa-hitachi-hiuxwe2"); break; + } + } + else if (CPU_IS_HP_MC68K (cpu)) + puts ("m68k-hitachi-hiuxwe2"); + else puts ("unknown-hitachi-hiuxwe2"); + exit (0); + } +EOF + $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=$("$dummy") && + { echo "$SYSTEM_NAME"; exit; } + echo unknown-hitachi-hiuxwe2 + exit ;; + 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:*) + echo hppa1.1-hp-bsd + exit ;; + 9000/8??:4.3bsd:*:*) + echo hppa1.0-hp-bsd + exit ;; + *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) + echo hppa1.0-hp-mpeix + exit ;; + hp7??:OSF1:*:* | hp8?[79]:OSF1:*:*) + echo hppa1.1-hp-osf + exit ;; + hp8??:OSF1:*:*) + echo hppa1.0-hp-osf + exit ;; + i*86:OSF1:*:*) + if test -x /usr/sbin/sysversion ; then + echo "$UNAME_MACHINE"-unknown-osf1mk + else + echo "$UNAME_MACHINE"-unknown-osf1 + fi + exit ;; + parisc*:Lites*:*:*) + echo hppa1.1-hp-lites + exit ;; + C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) + echo c1-convex-bsd + exit ;; + C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) + if getsysinfo -f scalar_acc + then echo c32-convex-bsd + else echo c2-convex-bsd + fi + exit ;; + C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) + echo c34-convex-bsd + exit ;; + C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) + echo c38-convex-bsd + exit ;; + C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) + echo c4-convex-bsd + exit ;; + CRAY*Y-MP:*:*:*) + echo ymp-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*[A-Z]90:*:*:*) + echo "$UNAME_MACHINE"-cray-unicos"$UNAME_RELEASE" \ + | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ + -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ + -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*TS:*:*:*) + echo t90-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*T3E:*:*:*) + echo alphaev5-cray-unicosmk"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*SV1:*:*:*) + echo sv1-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' + exit ;; + *:UNICOS/mp:*:*) + echo craynv-cray-unicosmp"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' + exit ;; + F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) + FUJITSU_PROC=$(uname -m | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz) + FUJITSU_SYS=$(uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///') + FUJITSU_REL=$(echo "$UNAME_RELEASE" | sed -e 's/ /_/') + echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit ;; + 5000:UNIX_System_V:4.*:*) + FUJITSU_SYS=$(uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///') + FUJITSU_REL=$(echo "$UNAME_RELEASE" | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/') + echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit ;; + i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) + echo "$UNAME_MACHINE"-pc-bsdi"$UNAME_RELEASE" + exit ;; + sparc*:BSD/OS:*:*) + echo sparc-unknown-bsdi"$UNAME_RELEASE" + exit ;; + *:BSD/OS:*:*) + echo "$UNAME_MACHINE"-unknown-bsdi"$UNAME_RELEASE" + exit ;; + arm:FreeBSD:*:*) + UNAME_PROCESSOR=$(uname -p) + set_cc_for_build + if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_PCS_VFP + then + echo "${UNAME_PROCESSOR}"-unknown-freebsd"$(echo ${UNAME_RELEASE}|sed -e 's/[-(].*//')"-gnueabi + else + echo "${UNAME_PROCESSOR}"-unknown-freebsd"$(echo ${UNAME_RELEASE}|sed -e 's/[-(].*//')"-gnueabihf + fi + exit ;; + *:FreeBSD:*:*) + UNAME_PROCESSOR=$(/usr/bin/uname -p) + case "$UNAME_PROCESSOR" in + amd64) + UNAME_PROCESSOR=x86_64 ;; + i386) + UNAME_PROCESSOR=i586 ;; + esac + echo "$UNAME_PROCESSOR"-unknown-freebsd"$(echo "$UNAME_RELEASE"|sed -e 's/[-(].*//')" + exit ;; + i*:CYGWIN*:*) + echo "$UNAME_MACHINE"-pc-cygwin + exit ;; + *:MINGW64*:*) + echo "$UNAME_MACHINE"-pc-mingw64 + exit ;; + *:MINGW*:*) + echo "$UNAME_MACHINE"-pc-mingw32 + exit ;; + *:MSYS*:*) + echo "$UNAME_MACHINE"-pc-msys + exit ;; + i*:PW*:*) + echo "$UNAME_MACHINE"-pc-pw32 + exit ;; + *:Interix*:*) + case "$UNAME_MACHINE" in + x86) + echo i586-pc-interix"$UNAME_RELEASE" + exit ;; + authenticamd | genuineintel | EM64T) + echo x86_64-unknown-interix"$UNAME_RELEASE" + exit ;; + IA64) + echo ia64-unknown-interix"$UNAME_RELEASE" + exit ;; + esac ;; + i*:UWIN*:*) + echo "$UNAME_MACHINE"-pc-uwin + exit ;; + amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) + echo x86_64-pc-cygwin + exit ;; + prep*:SunOS:5.*:*) + echo powerpcle-unknown-solaris2"$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*//')" + exit ;; + *:GNU:*:*) + # the GNU system + echo "$(echo "$UNAME_MACHINE"|sed -e 's,[-/].*$,,')-unknown-$LIBC$(echo "$UNAME_RELEASE"|sed -e 's,/.*$,,')" + exit ;; + *:GNU/*:*:*) + # other systems with GNU libc and userland + echo "$UNAME_MACHINE-unknown-$(echo "$UNAME_SYSTEM" | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]")$(echo "$UNAME_RELEASE"|sed -e 's/[-(].*//')-$LIBC" + exit ;; + *:Minix:*:*) + echo "$UNAME_MACHINE"-unknown-minix + exit ;; + aarch64:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + aarch64_be:Linux:*:*) + UNAME_MACHINE=aarch64_be + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + alpha:Linux:*:*) + case $(sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' /proc/cpuinfo 2>/dev/null) in + EV5) UNAME_MACHINE=alphaev5 ;; + EV56) UNAME_MACHINE=alphaev56 ;; + PCA56) UNAME_MACHINE=alphapca56 ;; + PCA57) UNAME_MACHINE=alphapca56 ;; + EV6) UNAME_MACHINE=alphaev6 ;; + EV67) UNAME_MACHINE=alphaev67 ;; + EV68*) UNAME_MACHINE=alphaev68 ;; + esac + objdump --private-headers /bin/sh | grep -q ld.so.1 + if test "$?" = 0 ; then LIBC=gnulibc1 ; fi + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + arc:Linux:*:* | arceb:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + arm*:Linux:*:*) + set_cc_for_build + if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_EABI__ + then + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + else + if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_PCS_VFP + then + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabi + else + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabihf + fi + fi + exit ;; + avr32*:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + cris:Linux:*:*) + echo "$UNAME_MACHINE"-axis-linux-"$LIBC" + exit ;; + crisv32:Linux:*:*) + echo "$UNAME_MACHINE"-axis-linux-"$LIBC" + exit ;; + e2k:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + frv:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + hexagon:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + i*86:Linux:*:*) + echo "$UNAME_MACHINE"-pc-linux-"$LIBC" + exit ;; + ia64:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + k1om:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + m32r*:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + m68*:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + mips:Linux:*:* | mips64:Linux:*:*) + set_cc_for_build + IS_GLIBC=0 + test x"${LIBC}" = xgnu && IS_GLIBC=1 + sed 's/^ //' << EOF > "$dummy.c" + #undef CPU + #undef mips + #undef mipsel + #undef mips64 + #undef mips64el + #if ${IS_GLIBC} && defined(_ABI64) + LIBCABI=gnuabi64 + #else + #if ${IS_GLIBC} && defined(_ABIN32) + LIBCABI=gnuabin32 + #else + LIBCABI=${LIBC} + #endif + #endif + + #if ${IS_GLIBC} && defined(__mips64) && defined(__mips_isa_rev) && __mips_isa_rev>=6 + CPU=mipsisa64r6 + #else + #if ${IS_GLIBC} && !defined(__mips64) && defined(__mips_isa_rev) && __mips_isa_rev>=6 + CPU=mipsisa32r6 + #else + #if defined(__mips64) + CPU=mips64 + #else + CPU=mips + #endif + #endif + #endif + + #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) + MIPS_ENDIAN=el + #else + #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) + MIPS_ENDIAN= + #else + MIPS_ENDIAN= + #endif + #endif +EOF + eval "$($CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^CPU\|^MIPS_ENDIAN\|^LIBCABI')" + test "x$CPU" != x && { echo "$CPU${MIPS_ENDIAN}-unknown-linux-$LIBCABI"; exit; } + ;; + mips64el:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + openrisc*:Linux:*:*) + echo or1k-unknown-linux-"$LIBC" + exit ;; + or32:Linux:*:* | or1k*:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + padre:Linux:*:*) + echo sparc-unknown-linux-"$LIBC" + exit ;; + parisc64:Linux:*:* | hppa64:Linux:*:*) + echo hppa64-unknown-linux-"$LIBC" + exit ;; + parisc:Linux:*:* | hppa:Linux:*:*) + # Look for CPU level + case $(grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2) in + PA7*) echo hppa1.1-unknown-linux-"$LIBC" ;; + PA8*) echo hppa2.0-unknown-linux-"$LIBC" ;; + *) echo hppa-unknown-linux-"$LIBC" ;; + esac + exit ;; + ppc64:Linux:*:*) + echo powerpc64-unknown-linux-"$LIBC" + exit ;; + ppc:Linux:*:*) + echo powerpc-unknown-linux-"$LIBC" + exit ;; + ppc64le:Linux:*:*) + echo powerpc64le-unknown-linux-"$LIBC" + exit ;; + ppcle:Linux:*:*) + echo powerpcle-unknown-linux-"$LIBC" + exit ;; + riscv32:Linux:*:* | riscv64:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + s390:Linux:*:* | s390x:Linux:*:*) + echo "$UNAME_MACHINE"-ibm-linux-"$LIBC" + exit ;; + sh64*:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + sh*:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + sparc:Linux:*:* | sparc64:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + tile*:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + vax:Linux:*:*) + echo "$UNAME_MACHINE"-dec-linux-"$LIBC" + exit ;; + x86_64:Linux:*:*) + set_cc_for_build + LIBCABI=$LIBC + if test "$CC_FOR_BUILD" != no_compiler_found; then + if (echo '#ifdef __ILP32__'; echo IS_X32; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_X32 >/dev/null + then + LIBCABI="$LIBC"x32 + fi + fi + echo "$UNAME_MACHINE"-pc-linux-"$LIBCABI" + exit ;; + xtensa*:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + i*86:DYNIX/ptx:4*:*) + # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. + # earlier versions are messed up and put the nodename in both + # sysname and nodename. + echo i386-sequent-sysv4 + exit ;; + i*86:UNIX_SV:4.2MP:2.*) + # Unixware is an offshoot of SVR4, but it has its own version + # number series starting with 2... + # I am not positive that other SVR4 systems won't match this, + # I just have to hope. -- rms. + # Use sysv4.2uw... so that sysv4* matches it. + echo "$UNAME_MACHINE"-pc-sysv4.2uw"$UNAME_VERSION" + exit ;; + i*86:OS/2:*:*) + # If we were able to find `uname', then EMX Unix compatibility + # is probably installed. + echo "$UNAME_MACHINE"-pc-os2-emx + exit ;; + i*86:XTS-300:*:STOP) + echo "$UNAME_MACHINE"-unknown-stop + exit ;; + i*86:atheos:*:*) + echo "$UNAME_MACHINE"-unknown-atheos + exit ;; + i*86:syllable:*:*) + echo "$UNAME_MACHINE"-pc-syllable + exit ;; + i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) + echo i386-unknown-lynxos"$UNAME_RELEASE" + exit ;; + i*86:*DOS:*:*) + echo "$UNAME_MACHINE"-pc-msdosdjgpp + exit ;; + i*86:*:4.*:*) + UNAME_REL=$(echo "$UNAME_RELEASE" | sed 's/\/MP$//') + if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then + echo "$UNAME_MACHINE"-univel-sysv"$UNAME_REL" + else + echo "$UNAME_MACHINE"-pc-sysv"$UNAME_REL" + fi + exit ;; + i*86:*:5:[678]*) + # UnixWare 7.x, OpenUNIX and OpenServer 6. + case $(/bin/uname -X | grep "^Machine") in + *486*) UNAME_MACHINE=i486 ;; + *Pentium) UNAME_MACHINE=i586 ;; + *Pent*|*Celeron) UNAME_MACHINE=i686 ;; + esac + echo "$UNAME_MACHINE-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}" + exit ;; + i*86:*:3.2:*) + if test -f /usr/options/cb.name; then + UNAME_REL=$(sed -n 's/.*Version //p' /dev/null >/dev/null ; then + UNAME_REL=$( (/bin/uname -X|grep Release|sed -e 's/.*= //')) + (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 + (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ + && UNAME_MACHINE=i586 + (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ + && UNAME_MACHINE=i686 + (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ + && UNAME_MACHINE=i686 + echo "$UNAME_MACHINE"-pc-sco"$UNAME_REL" + else + echo "$UNAME_MACHINE"-pc-sysv32 + fi + exit ;; + pc:*:*:*) + # Left here for compatibility: + # uname -m prints for DJGPP always 'pc', but it prints nothing about + # the processor, so we play safe by assuming i586. + # Note: whatever this is, it MUST be the same as what config.sub + # prints for the "djgpp" host, or else GDB configure will decide that + # this is a cross-build. + echo i586-pc-msdosdjgpp + exit ;; + Intel:Mach:3*:*) + echo i386-pc-mach3 + exit ;; + paragon:*:*:*) + echo i860-intel-osf1 + exit ;; + i860:*:4.*:*) # i860-SVR4 + if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then + echo i860-stardent-sysv"$UNAME_RELEASE" # Stardent Vistra i860-SVR4 + else # Add other i860-SVR4 vendors below as they are discovered. + echo i860-unknown-sysv"$UNAME_RELEASE" # Unknown i860-SVR4 + fi + exit ;; + mini*:CTIX:SYS*5:*) + # "miniframe" + echo m68010-convergent-sysv + exit ;; + mc68k:UNIX:SYSTEM5:3.51m) + echo m68k-convergent-sysv + exit ;; + M680?0:D-NIX:5.3:*) + echo m68k-diab-dnix + exit ;; + M68*:*:R3V[5678]*:*) + test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; + 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) + OS_REL='' + test -r /etc/.relid \ + && OS_REL=.$(sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid) + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4.3"$OS_REL"; exit; } + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;; + 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4; exit; } ;; + NCR*:*:4.2:* | MPRAS*:*:4.2:*) + OS_REL='.3' + test -r /etc/.relid \ + && OS_REL=.$(sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid) + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4.3"$OS_REL"; exit; } + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } + /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ + && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;; + m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) + echo m68k-unknown-lynxos"$UNAME_RELEASE" + exit ;; + mc68030:UNIX_System_V:4.*:*) + echo m68k-atari-sysv4 + exit ;; + TSUNAMI:LynxOS:2.*:*) + echo sparc-unknown-lynxos"$UNAME_RELEASE" + exit ;; + rs6000:LynxOS:2.*:*) + echo rs6000-unknown-lynxos"$UNAME_RELEASE" + exit ;; + PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) + echo powerpc-unknown-lynxos"$UNAME_RELEASE" + exit ;; + SM[BE]S:UNIX_SV:*:*) + echo mips-dde-sysv"$UNAME_RELEASE" + exit ;; + RM*:ReliantUNIX-*:*:*) + echo mips-sni-sysv4 + exit ;; + RM*:SINIX-*:*:*) + echo mips-sni-sysv4 + exit ;; + *:SINIX-*:*:*) + if uname -p 2>/dev/null >/dev/null ; then + UNAME_MACHINE=$( (uname -p) 2>/dev/null) + echo "$UNAME_MACHINE"-sni-sysv4 + else + echo ns32k-sni-sysv + fi + exit ;; + PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort + # says + echo i586-unisys-sysv4 + exit ;; + *:UNIX_System_V:4*:FTX*) + # From Gerald Hewes . + # How about differentiating between stratus architectures? -djm + echo hppa1.1-stratus-sysv4 + exit ;; + *:*:*:FTX*) + # From seanf@swdc.stratus.com. + echo i860-stratus-sysv4 + exit ;; + i*86:VOS:*:*) + # From Paul.Green@stratus.com. + echo "$UNAME_MACHINE"-stratus-vos + exit ;; + *:VOS:*:*) + # From Paul.Green@stratus.com. + echo hppa1.1-stratus-vos + exit ;; + mc68*:A/UX:*:*) + echo m68k-apple-aux"$UNAME_RELEASE" + exit ;; + news*:NEWS-OS:6*:*) + echo mips-sony-newsos6 + exit ;; + R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) + if test -d /usr/nec; then + echo mips-nec-sysv"$UNAME_RELEASE" + else + echo mips-unknown-sysv"$UNAME_RELEASE" + fi + exit ;; + BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. + echo powerpc-be-beos + exit ;; + BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. + echo powerpc-apple-beos + exit ;; + BePC:BeOS:*:*) # BeOS running on Intel PC compatible. + echo i586-pc-beos + exit ;; + BePC:Haiku:*:*) # Haiku running on Intel PC compatible. + echo i586-pc-haiku + exit ;; + x86_64:Haiku:*:*) + echo x86_64-unknown-haiku + exit ;; + SX-4:SUPER-UX:*:*) + echo sx4-nec-superux"$UNAME_RELEASE" + exit ;; + SX-5:SUPER-UX:*:*) + echo sx5-nec-superux"$UNAME_RELEASE" + exit ;; + SX-6:SUPER-UX:*:*) + echo sx6-nec-superux"$UNAME_RELEASE" + exit ;; + SX-7:SUPER-UX:*:*) + echo sx7-nec-superux"$UNAME_RELEASE" + exit ;; + SX-8:SUPER-UX:*:*) + echo sx8-nec-superux"$UNAME_RELEASE" + exit ;; + SX-8R:SUPER-UX:*:*) + echo sx8r-nec-superux"$UNAME_RELEASE" + exit ;; + SX-ACE:SUPER-UX:*:*) + echo sxace-nec-superux"$UNAME_RELEASE" + exit ;; + Power*:Rhapsody:*:*) + echo powerpc-apple-rhapsody"$UNAME_RELEASE" + exit ;; + *:Rhapsody:*:*) + echo "$UNAME_MACHINE"-apple-rhapsody"$UNAME_RELEASE" + exit ;; + arm64:Darwin:*:*) + echo aarch64-apple-darwin"$UNAME_RELEASE" + exit ;; + *:Darwin:*:*) + UNAME_PROCESSOR=$(uname -p) + case $UNAME_PROCESSOR in + unknown) UNAME_PROCESSOR=powerpc ;; + esac + if command -v xcode-select > /dev/null 2> /dev/null && \ + ! xcode-select --print-path > /dev/null 2> /dev/null ; then + # Avoid executing cc if there is no toolchain installed as + # cc will be a stub that puts up a graphical alert + # prompting the user to install developer tools. + CC_FOR_BUILD=no_compiler_found + else + set_cc_for_build + fi + if test "$CC_FOR_BUILD" != no_compiler_found; then + if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + case $UNAME_PROCESSOR in + i386) UNAME_PROCESSOR=x86_64 ;; + powerpc) UNAME_PROCESSOR=powerpc64 ;; + esac + fi + # On 10.4-10.6 one might compile for PowerPC via gcc -arch ppc + if (echo '#ifdef __POWERPC__'; echo IS_PPC; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_PPC >/dev/null + then + UNAME_PROCESSOR=powerpc + fi + elif test "$UNAME_PROCESSOR" = i386 ; then + # uname -m returns i386 or x86_64 + UNAME_PROCESSOR=$UNAME_MACHINE + fi + echo "$UNAME_PROCESSOR"-apple-darwin"$UNAME_RELEASE" + exit ;; + *:procnto*:*:* | *:QNX:[0123456789]*:*) + UNAME_PROCESSOR=$(uname -p) + if test "$UNAME_PROCESSOR" = x86; then + UNAME_PROCESSOR=i386 + UNAME_MACHINE=pc + fi + echo "$UNAME_PROCESSOR"-"$UNAME_MACHINE"-nto-qnx"$UNAME_RELEASE" + exit ;; + *:QNX:*:4*) + echo i386-pc-qnx + exit ;; + NEO-*:NONSTOP_KERNEL:*:*) + echo neo-tandem-nsk"$UNAME_RELEASE" + exit ;; + NSE-*:NONSTOP_KERNEL:*:*) + echo nse-tandem-nsk"$UNAME_RELEASE" + exit ;; + NSR-*:NONSTOP_KERNEL:*:*) + echo nsr-tandem-nsk"$UNAME_RELEASE" + exit ;; + NSV-*:NONSTOP_KERNEL:*:*) + echo nsv-tandem-nsk"$UNAME_RELEASE" + exit ;; + NSX-*:NONSTOP_KERNEL:*:*) + echo nsx-tandem-nsk"$UNAME_RELEASE" + exit ;; + *:NonStop-UX:*:*) + echo mips-compaq-nonstopux + exit ;; + BS2000:POSIX*:*:*) + echo bs2000-siemens-sysv + exit ;; + DS/*:UNIX_System_V:*:*) + echo "$UNAME_MACHINE"-"$UNAME_SYSTEM"-"$UNAME_RELEASE" + exit ;; + *:Plan9:*:*) + # "uname -m" is not consistent, so use $cputype instead. 386 + # is converted to i386 for consistency with other x86 + # operating systems. + # shellcheck disable=SC2154 + if test "$cputype" = 386; then + UNAME_MACHINE=i386 + else + UNAME_MACHINE="$cputype" + fi + echo "$UNAME_MACHINE"-unknown-plan9 + exit ;; + *:TOPS-10:*:*) + echo pdp10-unknown-tops10 + exit ;; + *:TENEX:*:*) + echo pdp10-unknown-tenex + exit ;; + KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) + echo pdp10-dec-tops20 + exit ;; + XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) + echo pdp10-xkl-tops20 + exit ;; + *:TOPS-20:*:*) + echo pdp10-unknown-tops20 + exit ;; + *:ITS:*:*) + echo pdp10-unknown-its + exit ;; + SEI:*:*:SEIUX) + echo mips-sei-seiux"$UNAME_RELEASE" + exit ;; + *:DragonFly:*:*) + echo "$UNAME_MACHINE"-unknown-dragonfly"$(echo "$UNAME_RELEASE"|sed -e 's/[-(].*//')" + exit ;; + *:*VMS:*:*) + UNAME_MACHINE=$( (uname -p) 2>/dev/null) + case "$UNAME_MACHINE" in + A*) echo alpha-dec-vms ; exit ;; + I*) echo ia64-dec-vms ; exit ;; + V*) echo vax-dec-vms ; exit ;; + esac ;; + *:XENIX:*:SysV) + echo i386-pc-xenix + exit ;; + i*86:skyos:*:*) + echo "$UNAME_MACHINE"-pc-skyos"$(echo "$UNAME_RELEASE" | sed -e 's/ .*$//')" + exit ;; + i*86:rdos:*:*) + echo "$UNAME_MACHINE"-pc-rdos + exit ;; + i*86:AROS:*:*) + echo "$UNAME_MACHINE"-pc-aros + exit ;; + x86_64:VMkernel:*:*) + echo "$UNAME_MACHINE"-unknown-esx + exit ;; + amd64:Isilon\ OneFS:*:*) + echo x86_64-unknown-onefs + exit ;; + *:Unleashed:*:*) + echo "$UNAME_MACHINE"-unknown-unleashed"$UNAME_RELEASE" + exit ;; +esac + +# No uname command or uname output not recognized. +set_cc_for_build +cat > "$dummy.c" < +#include +#endif +#if defined(ultrix) || defined(_ultrix) || defined(__ultrix) || defined(__ultrix__) +#if defined (vax) || defined (__vax) || defined (__vax__) || defined(mips) || defined(__mips) || defined(__mips__) || defined(MIPS) || defined(__MIPS__) +#include +#if defined(_SIZE_T_) || defined(SIGLOST) +#include +#endif +#endif +#endif +main () +{ +#if defined (sony) +#if defined (MIPSEB) + /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed, + I don't know.... */ + printf ("mips-sony-bsd\n"); exit (0); +#else +#include + printf ("m68k-sony-newsos%s\n", +#ifdef NEWSOS4 + "4" +#else + "" +#endif + ); exit (0); +#endif +#endif + +#if defined (NeXT) +#if !defined (__ARCHITECTURE__) +#define __ARCHITECTURE__ "m68k" +#endif + int version; + version=$( (hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null); + if (version < 4) + printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version); + else + printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version); + exit (0); +#endif + +#if defined (MULTIMAX) || defined (n16) +#if defined (UMAXV) + printf ("ns32k-encore-sysv\n"); exit (0); +#else +#if defined (CMU) + printf ("ns32k-encore-mach\n"); exit (0); +#else + printf ("ns32k-encore-bsd\n"); exit (0); +#endif +#endif +#endif + +#if defined (__386BSD__) + printf ("i386-pc-bsd\n"); exit (0); +#endif + +#if defined (sequent) +#if defined (i386) + printf ("i386-sequent-dynix\n"); exit (0); +#endif +#if defined (ns32000) + printf ("ns32k-sequent-dynix\n"); exit (0); +#endif +#endif + +#if defined (_SEQUENT_) + struct utsname un; + + uname(&un); + if (strncmp(un.version, "V2", 2) == 0) { + printf ("i386-sequent-ptx2\n"); exit (0); + } + if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */ + printf ("i386-sequent-ptx1\n"); exit (0); + } + printf ("i386-sequent-ptx\n"); exit (0); +#endif + +#if defined (vax) +#if !defined (ultrix) +#include +#if defined (BSD) +#if BSD == 43 + printf ("vax-dec-bsd4.3\n"); exit (0); +#else +#if BSD == 199006 + printf ("vax-dec-bsd4.3reno\n"); exit (0); +#else + printf ("vax-dec-bsd\n"); exit (0); +#endif +#endif +#else + printf ("vax-dec-bsd\n"); exit (0); +#endif +#else +#if defined(_SIZE_T_) || defined(SIGLOST) + struct utsname un; + uname (&un); + printf ("vax-dec-ultrix%s\n", un.release); exit (0); +#else + printf ("vax-dec-ultrix\n"); exit (0); +#endif +#endif +#endif +#if defined(ultrix) || defined(_ultrix) || defined(__ultrix) || defined(__ultrix__) +#if defined(mips) || defined(__mips) || defined(__mips__) || defined(MIPS) || defined(__MIPS__) +#if defined(_SIZE_T_) || defined(SIGLOST) + struct utsname *un; + uname (&un); + printf ("mips-dec-ultrix%s\n", un.release); exit (0); +#else + printf ("mips-dec-ultrix\n"); exit (0); +#endif +#endif +#endif + +#if defined (alliant) && defined (i860) + printf ("i860-alliant-bsd\n"); exit (0); +#endif + + exit (1); +} +EOF + +$CC_FOR_BUILD -o "$dummy" "$dummy.c" 2>/dev/null && SYSTEM_NAME=$($dummy) && + { echo "$SYSTEM_NAME"; exit; } + +# Apollos put the system type in the environment. +test -d /usr/apollo && { echo "$ISP-apollo-$SYSTYPE"; exit; } + +echo "$0: unable to guess system type" >&2 + +case "$UNAME_MACHINE:$UNAME_SYSTEM" in + mips:Linux | mips64:Linux) + # If we got here on MIPS GNU/Linux, output extra information. + cat >&2 <&2 <&2 </dev/null || echo unknown) +uname -r = $( (uname -r) 2>/dev/null || echo unknown) +uname -s = $( (uname -s) 2>/dev/null || echo unknown) +uname -v = $( (uname -v) 2>/dev/null || echo unknown) + +/usr/bin/uname -p = $( (/usr/bin/uname -p) 2>/dev/null) +/bin/uname -X = $( (/bin/uname -X) 2>/dev/null) + +hostinfo = $( (hostinfo) 2>/dev/null) +/bin/universe = $( (/bin/universe) 2>/dev/null) +/usr/bin/arch -k = $( (/usr/bin/arch -k) 2>/dev/null) +/bin/arch = $( (/bin/arch) 2>/dev/null) +/usr/bin/oslevel = $( (/usr/bin/oslevel) 2>/dev/null) +/usr/convex/getsysinfo = $( (/usr/convex/getsysinfo) 2>/dev/null) + +UNAME_MACHINE = "$UNAME_MACHINE" +UNAME_RELEASE = "$UNAME_RELEASE" +UNAME_SYSTEM = "$UNAME_SYSTEM" +UNAME_VERSION = "$UNAME_VERSION" +EOF +fi + +exit 1 + +# Local variables: +# eval: (add-hook 'before-save-hook 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/vendor/riscv-isa-sim/scripts/config.sub b/vendor/riscv-isa-sim/scripts/config.sub new file mode 100644 index 00000000..19c9553b --- /dev/null +++ b/vendor/riscv-isa-sim/scripts/config.sub @@ -0,0 +1,1854 @@ +#! /bin/sh +# Configuration validation subroutine script. +# Copyright 1992-2020 Free Software Foundation, Inc. + +timestamp='2020-12-02' + +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see . +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that +# program. This Exception is an additional permission under section 7 +# of the GNU General Public License, version 3 ("GPLv3"). + + +# Please send patches to . +# +# Configuration subroutine to validate and canonicalize a configuration type. +# Supply the specified configuration type as an argument. +# If it is invalid, we print an error message on stderr and exit with code 1. +# Otherwise, we print the canonical config type on stdout and succeed. + +# You can get the latest version of this script from: +# https://git.savannah.gnu.org/cgit/config.git/plain/config.sub + +# This file is supposed to be the same for all GNU packages +# and recognize all the CPU types, system types and aliases +# that are meaningful with *any* GNU software. +# Each package is responsible for reporting which valid configurations +# it does not support. The user should be able to distinguish +# a failure to support a valid configuration from a meaningless +# configuration. + +# The goal of this file is to map all the various variations of a given +# machine specification into a single specification in the form: +# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM +# or in some cases, the newer four-part form: +# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM +# It is wrong to echo any other type of specification. + +me=$(echo "$0" | sed -e 's,.*/,,') + +usage="\ +Usage: $0 [OPTION] CPU-MFR-OPSYS or ALIAS + +Canonicalize a configuration name. + +Options: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.sub ($timestamp) + +Copyright 1992-2020 Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" >&2 + exit 1 ;; + + *local*) + # First pass through any local machine types. + echo "$1" + exit ;; + + * ) + break ;; + esac +done + +case $# in + 0) echo "$me: missing argument$help" >&2 + exit 1;; + 1) ;; + *) echo "$me: too many arguments$help" >&2 + exit 1;; +esac + +# Split fields of configuration type +# shellcheck disable=SC2162 +IFS="-" read field1 field2 field3 field4 <&2 + exit 1 + ;; + *-*-*-*) + basic_machine=$field1-$field2 + basic_os=$field3-$field4 + ;; + *-*-*) + # Ambiguous whether COMPANY is present, or skipped and KERNEL-OS is two + # parts + maybe_os=$field2-$field3 + case $maybe_os in + nto-qnx* | linux-* | uclinux-uclibc* \ + | uclinux-gnu* | kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* \ + | netbsd*-eabi* | kopensolaris*-gnu* | cloudabi*-eabi* \ + | storm-chaos* | os2-emx* | rtmk-nova*) + basic_machine=$field1 + basic_os=$maybe_os + ;; + android-linux) + basic_machine=$field1-unknown + basic_os=linux-android + ;; + *) + basic_machine=$field1-$field2 + basic_os=$field3 + ;; + esac + ;; + *-*) + # A lone config we happen to match not fitting any pattern + case $field1-$field2 in + decstation-3100) + basic_machine=mips-dec + basic_os= + ;; + *-*) + # Second component is usually, but not always the OS + case $field2 in + # Prevent following clause from handling this valid os + sun*os*) + basic_machine=$field1 + basic_os=$field2 + ;; + # Manufacturers + dec* | mips* | sequent* | encore* | pc533* | sgi* | sony* \ + | att* | 7300* | 3300* | delta* | motorola* | sun[234]* \ + | unicom* | ibm* | next | hp | isi* | apollo | altos* \ + | convergent* | ncr* | news | 32* | 3600* | 3100* \ + | hitachi* | c[123]* | convex* | sun | crds | omron* | dg \ + | ultra | tti* | harris | dolphin | highlevel | gould \ + | cbm | ns | masscomp | apple | axis | knuth | cray \ + | microblaze* | sim | cisco \ + | oki | wec | wrs | winbond) + basic_machine=$field1-$field2 + basic_os= + ;; + *) + basic_machine=$field1 + basic_os=$field2 + ;; + esac + ;; + esac + ;; + *) + # Convert single-component short-hands not valid as part of + # multi-component configurations. + case $field1 in + 386bsd) + basic_machine=i386-pc + basic_os=bsd + ;; + a29khif) + basic_machine=a29k-amd + basic_os=udi + ;; + adobe68k) + basic_machine=m68010-adobe + basic_os=scout + ;; + alliant) + basic_machine=fx80-alliant + basic_os= + ;; + altos | altos3068) + basic_machine=m68k-altos + basic_os= + ;; + am29k) + basic_machine=a29k-none + basic_os=bsd + ;; + amdahl) + basic_machine=580-amdahl + basic_os=sysv + ;; + amiga) + basic_machine=m68k-unknown + basic_os= + ;; + amigaos | amigados) + basic_machine=m68k-unknown + basic_os=amigaos + ;; + amigaunix | amix) + basic_machine=m68k-unknown + basic_os=sysv4 + ;; + apollo68) + basic_machine=m68k-apollo + basic_os=sysv + ;; + apollo68bsd) + basic_machine=m68k-apollo + basic_os=bsd + ;; + aros) + basic_machine=i386-pc + basic_os=aros + ;; + aux) + basic_machine=m68k-apple + basic_os=aux + ;; + balance) + basic_machine=ns32k-sequent + basic_os=dynix + ;; + blackfin) + basic_machine=bfin-unknown + basic_os=linux + ;; + cegcc) + basic_machine=arm-unknown + basic_os=cegcc + ;; + convex-c1) + basic_machine=c1-convex + basic_os=bsd + ;; + convex-c2) + basic_machine=c2-convex + basic_os=bsd + ;; + convex-c32) + basic_machine=c32-convex + basic_os=bsd + ;; + convex-c34) + basic_machine=c34-convex + basic_os=bsd + ;; + convex-c38) + basic_machine=c38-convex + basic_os=bsd + ;; + cray) + basic_machine=j90-cray + basic_os=unicos + ;; + crds | unos) + basic_machine=m68k-crds + basic_os= + ;; + da30) + basic_machine=m68k-da30 + basic_os= + ;; + decstation | pmax | pmin | dec3100 | decstatn) + basic_machine=mips-dec + basic_os= + ;; + delta88) + basic_machine=m88k-motorola + basic_os=sysv3 + ;; + dicos) + basic_machine=i686-pc + basic_os=dicos + ;; + djgpp) + basic_machine=i586-pc + basic_os=msdosdjgpp + ;; + ebmon29k) + basic_machine=a29k-amd + basic_os=ebmon + ;; + es1800 | OSE68k | ose68k | ose | OSE) + basic_machine=m68k-ericsson + basic_os=ose + ;; + gmicro) + basic_machine=tron-gmicro + basic_os=sysv + ;; + go32) + basic_machine=i386-pc + basic_os=go32 + ;; + h8300hms) + basic_machine=h8300-hitachi + basic_os=hms + ;; + h8300xray) + basic_machine=h8300-hitachi + basic_os=xray + ;; + h8500hms) + basic_machine=h8500-hitachi + basic_os=hms + ;; + harris) + basic_machine=m88k-harris + basic_os=sysv3 + ;; + hp300 | hp300hpux) + basic_machine=m68k-hp + basic_os=hpux + ;; + hp300bsd) + basic_machine=m68k-hp + basic_os=bsd + ;; + hppaosf) + basic_machine=hppa1.1-hp + basic_os=osf + ;; + hppro) + basic_machine=hppa1.1-hp + basic_os=proelf + ;; + i386mach) + basic_machine=i386-mach + basic_os=mach + ;; + isi68 | isi) + basic_machine=m68k-isi + basic_os=sysv + ;; + m68knommu) + basic_machine=m68k-unknown + basic_os=linux + ;; + magnum | m3230) + basic_machine=mips-mips + basic_os=sysv + ;; + merlin) + basic_machine=ns32k-utek + basic_os=sysv + ;; + mingw64) + basic_machine=x86_64-pc + basic_os=mingw64 + ;; + mingw32) + basic_machine=i686-pc + basic_os=mingw32 + ;; + mingw32ce) + basic_machine=arm-unknown + basic_os=mingw32ce + ;; + monitor) + basic_machine=m68k-rom68k + basic_os=coff + ;; + morphos) + basic_machine=powerpc-unknown + basic_os=morphos + ;; + moxiebox) + basic_machine=moxie-unknown + basic_os=moxiebox + ;; + msdos) + basic_machine=i386-pc + basic_os=msdos + ;; + msys) + basic_machine=i686-pc + basic_os=msys + ;; + mvs) + basic_machine=i370-ibm + basic_os=mvs + ;; + nacl) + basic_machine=le32-unknown + basic_os=nacl + ;; + ncr3000) + basic_machine=i486-ncr + basic_os=sysv4 + ;; + netbsd386) + basic_machine=i386-pc + basic_os=netbsd + ;; + netwinder) + basic_machine=armv4l-rebel + basic_os=linux + ;; + news | news700 | news800 | news900) + basic_machine=m68k-sony + basic_os=newsos + ;; + news1000) + basic_machine=m68030-sony + basic_os=newsos + ;; + necv70) + basic_machine=v70-nec + basic_os=sysv + ;; + nh3000) + basic_machine=m68k-harris + basic_os=cxux + ;; + nh[45]000) + basic_machine=m88k-harris + basic_os=cxux + ;; + nindy960) + basic_machine=i960-intel + basic_os=nindy + ;; + mon960) + basic_machine=i960-intel + basic_os=mon960 + ;; + nonstopux) + basic_machine=mips-compaq + basic_os=nonstopux + ;; + os400) + basic_machine=powerpc-ibm + basic_os=os400 + ;; + OSE68000 | ose68000) + basic_machine=m68000-ericsson + basic_os=ose + ;; + os68k) + basic_machine=m68k-none + basic_os=os68k + ;; + paragon) + basic_machine=i860-intel + basic_os=osf + ;; + parisc) + basic_machine=hppa-unknown + basic_os=linux + ;; + psp) + basic_machine=mipsallegrexel-sony + basic_os=psp + ;; + pw32) + basic_machine=i586-unknown + basic_os=pw32 + ;; + rdos | rdos64) + basic_machine=x86_64-pc + basic_os=rdos + ;; + rdos32) + basic_machine=i386-pc + basic_os=rdos + ;; + rom68k) + basic_machine=m68k-rom68k + basic_os=coff + ;; + sa29200) + basic_machine=a29k-amd + basic_os=udi + ;; + sei) + basic_machine=mips-sei + basic_os=seiux + ;; + sequent) + basic_machine=i386-sequent + basic_os= + ;; + sps7) + basic_machine=m68k-bull + basic_os=sysv2 + ;; + st2000) + basic_machine=m68k-tandem + basic_os= + ;; + stratus) + basic_machine=i860-stratus + basic_os=sysv4 + ;; + sun2) + basic_machine=m68000-sun + basic_os= + ;; + sun2os3) + basic_machine=m68000-sun + basic_os=sunos3 + ;; + sun2os4) + basic_machine=m68000-sun + basic_os=sunos4 + ;; + sun3) + basic_machine=m68k-sun + basic_os= + ;; + sun3os3) + basic_machine=m68k-sun + basic_os=sunos3 + ;; + sun3os4) + basic_machine=m68k-sun + basic_os=sunos4 + ;; + sun4) + basic_machine=sparc-sun + basic_os= + ;; + sun4os3) + basic_machine=sparc-sun + basic_os=sunos3 + ;; + sun4os4) + basic_machine=sparc-sun + basic_os=sunos4 + ;; + sun4sol2) + basic_machine=sparc-sun + basic_os=solaris2 + ;; + sun386 | sun386i | roadrunner) + basic_machine=i386-sun + basic_os= + ;; + sv1) + basic_machine=sv1-cray + basic_os=unicos + ;; + symmetry) + basic_machine=i386-sequent + basic_os=dynix + ;; + t3e) + basic_machine=alphaev5-cray + basic_os=unicos + ;; + t90) + basic_machine=t90-cray + basic_os=unicos + ;; + toad1) + basic_machine=pdp10-xkl + basic_os=tops20 + ;; + tpf) + basic_machine=s390x-ibm + basic_os=tpf + ;; + udi29k) + basic_machine=a29k-amd + basic_os=udi + ;; + ultra3) + basic_machine=a29k-nyu + basic_os=sym1 + ;; + v810 | necv810) + basic_machine=v810-nec + basic_os=none + ;; + vaxv) + basic_machine=vax-dec + basic_os=sysv + ;; + vms) + basic_machine=vax-dec + basic_os=vms + ;; + vsta) + basic_machine=i386-pc + basic_os=vsta + ;; + vxworks960) + basic_machine=i960-wrs + basic_os=vxworks + ;; + vxworks68) + basic_machine=m68k-wrs + basic_os=vxworks + ;; + vxworks29k) + basic_machine=a29k-wrs + basic_os=vxworks + ;; + xbox) + basic_machine=i686-pc + basic_os=mingw32 + ;; + ymp) + basic_machine=ymp-cray + basic_os=unicos + ;; + *) + basic_machine=$1 + basic_os= + ;; + esac + ;; +esac + +# Decode 1-component or ad-hoc basic machines +case $basic_machine in + # Here we handle the default manufacturer of certain CPU types. It is in + # some cases the only manufacturer, in others, it is the most popular. + w89k) + cpu=hppa1.1 + vendor=winbond + ;; + op50n) + cpu=hppa1.1 + vendor=oki + ;; + op60c) + cpu=hppa1.1 + vendor=oki + ;; + ibm*) + cpu=i370 + vendor=ibm + ;; + orion105) + cpu=clipper + vendor=highlevel + ;; + mac | mpw | mac-mpw) + cpu=m68k + vendor=apple + ;; + pmac | pmac-mpw) + cpu=powerpc + vendor=apple + ;; + + # Recognize the various machine names and aliases which stand + # for a CPU type and a company and sometimes even an OS. + 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) + cpu=m68000 + vendor=att + ;; + 3b*) + cpu=we32k + vendor=att + ;; + bluegene*) + cpu=powerpc + vendor=ibm + basic_os=cnk + ;; + decsystem10* | dec10*) + cpu=pdp10 + vendor=dec + basic_os=tops10 + ;; + decsystem20* | dec20*) + cpu=pdp10 + vendor=dec + basic_os=tops20 + ;; + delta | 3300 | motorola-3300 | motorola-delta \ + | 3300-motorola | delta-motorola) + cpu=m68k + vendor=motorola + ;; + dpx2*) + cpu=m68k + vendor=bull + basic_os=sysv3 + ;; + encore | umax | mmax) + cpu=ns32k + vendor=encore + ;; + elxsi) + cpu=elxsi + vendor=elxsi + basic_os=${basic_os:-bsd} + ;; + fx2800) + cpu=i860 + vendor=alliant + ;; + genix) + cpu=ns32k + vendor=ns + ;; + h3050r* | hiux*) + cpu=hppa1.1 + vendor=hitachi + basic_os=hiuxwe2 + ;; + hp3k9[0-9][0-9] | hp9[0-9][0-9]) + cpu=hppa1.0 + vendor=hp + ;; + hp9k2[0-9][0-9] | hp9k31[0-9]) + cpu=m68000 + vendor=hp + ;; + hp9k3[2-9][0-9]) + cpu=m68k + vendor=hp + ;; + hp9k6[0-9][0-9] | hp6[0-9][0-9]) + cpu=hppa1.0 + vendor=hp + ;; + hp9k7[0-79][0-9] | hp7[0-79][0-9]) + cpu=hppa1.1 + vendor=hp + ;; + hp9k78[0-9] | hp78[0-9]) + # FIXME: really hppa2.0-hp + cpu=hppa1.1 + vendor=hp + ;; + hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) + # FIXME: really hppa2.0-hp + cpu=hppa1.1 + vendor=hp + ;; + hp9k8[0-9][13679] | hp8[0-9][13679]) + cpu=hppa1.1 + vendor=hp + ;; + hp9k8[0-9][0-9] | hp8[0-9][0-9]) + cpu=hppa1.0 + vendor=hp + ;; + i*86v32) + cpu=$(echo "$1" | sed -e 's/86.*/86/') + vendor=pc + basic_os=sysv32 + ;; + i*86v4*) + cpu=$(echo "$1" | sed -e 's/86.*/86/') + vendor=pc + basic_os=sysv4 + ;; + i*86v) + cpu=$(echo "$1" | sed -e 's/86.*/86/') + vendor=pc + basic_os=sysv + ;; + i*86sol2) + cpu=$(echo "$1" | sed -e 's/86.*/86/') + vendor=pc + basic_os=solaris2 + ;; + j90 | j90-cray) + cpu=j90 + vendor=cray + basic_os=${basic_os:-unicos} + ;; + iris | iris4d) + cpu=mips + vendor=sgi + case $basic_os in + irix*) + ;; + *) + basic_os=irix4 + ;; + esac + ;; + miniframe) + cpu=m68000 + vendor=convergent + ;; + *mint | mint[0-9]* | *MiNT | *MiNT[0-9]*) + cpu=m68k + vendor=atari + basic_os=mint + ;; + news-3600 | risc-news) + cpu=mips + vendor=sony + basic_os=newsos + ;; + next | m*-next) + cpu=m68k + vendor=next + case $basic_os in + openstep*) + ;; + nextstep*) + ;; + ns2*) + basic_os=nextstep2 + ;; + *) + basic_os=nextstep3 + ;; + esac + ;; + np1) + cpu=np1 + vendor=gould + ;; + op50n-* | op60c-*) + cpu=hppa1.1 + vendor=oki + basic_os=proelf + ;; + pa-hitachi) + cpu=hppa1.1 + vendor=hitachi + basic_os=hiuxwe2 + ;; + pbd) + cpu=sparc + vendor=tti + ;; + pbb) + cpu=m68k + vendor=tti + ;; + pc532) + cpu=ns32k + vendor=pc532 + ;; + pn) + cpu=pn + vendor=gould + ;; + power) + cpu=power + vendor=ibm + ;; + ps2) + cpu=i386 + vendor=ibm + ;; + rm[46]00) + cpu=mips + vendor=siemens + ;; + rtpc | rtpc-*) + cpu=romp + vendor=ibm + ;; + sde) + cpu=mipsisa32 + vendor=sde + basic_os=${basic_os:-elf} + ;; + simso-wrs) + cpu=sparclite + vendor=wrs + basic_os=vxworks + ;; + tower | tower-32) + cpu=m68k + vendor=ncr + ;; + vpp*|vx|vx-*) + cpu=f301 + vendor=fujitsu + ;; + w65) + cpu=w65 + vendor=wdc + ;; + w89k-*) + cpu=hppa1.1 + vendor=winbond + basic_os=proelf + ;; + none) + cpu=none + vendor=none + ;; + leon|leon[3-9]) + cpu=sparc + vendor=$basic_machine + ;; + leon-*|leon[3-9]-*) + cpu=sparc + vendor=$(echo "$basic_machine" | sed 's/-.*//') + ;; + + *-*) + # shellcheck disable=SC2162 + IFS="-" read cpu vendor <&2 + exit 1 + ;; + esac + ;; +esac + +# Here we canonicalize certain aliases for manufacturers. +case $vendor in + digital*) + vendor=dec + ;; + commodore*) + vendor=cbm + ;; + *) + ;; +esac + +# Decode manufacturer-specific aliases for certain operating systems. + +if test x$basic_os != x +then + +# First recognize some ad-hoc caes, or perhaps split kernel-os, or else just +# set os. +case $basic_os in + gnu/linux*) + kernel=linux + os=$(echo $basic_os | sed -e 's|gnu/linux|gnu|') + ;; + os2-emx) + kernel=os2 + os=$(echo $basic_os | sed -e 's|os2-emx|emx|') + ;; + nto-qnx*) + kernel=nto + os=$(echo $basic_os | sed -e 's|nto-qnx|qnx|') + ;; + *-*) + # shellcheck disable=SC2162 + IFS="-" read kernel os <&2 + exit 1 + ;; +esac + +# As a final step for OS-related things, validate the OS-kernel combination +# (given a valid OS), if there is a kernel. +case $kernel-$os in + linux-gnu* | linux-dietlibc* | linux-android* | linux-newlib* | linux-musl* | linux-uclibc* ) + ;; + uclinux-uclibc* ) + ;; + -dietlibc* | -newlib* | -musl* | -uclibc* ) + # These are just libc implementations, not actual OSes, and thus + # require a kernel. + echo "Invalid configuration \`$1': libc \`$os' needs explicit kernel." 1>&2 + exit 1 + ;; + kfreebsd*-gnu* | kopensolaris*-gnu*) + ;; + nto-qnx*) + ;; + os2-emx) + ;; + *-eabi* | *-gnueabi*) + ;; + -*) + # Blank kernel with real OS is always fine. + ;; + *-*) + echo "Invalid configuration \`$1': Kernel \`$kernel' not known to work with OS \`$os'." 1>&2 + exit 1 + ;; +esac + +# Here we handle the case where we know the os, and the CPU type, but not the +# manufacturer. We pick the logical manufacturer. +case $vendor in + unknown) + case $cpu-$os in + *-riscix*) + vendor=acorn + ;; + *-sunos*) + vendor=sun + ;; + *-cnk* | *-aix*) + vendor=ibm + ;; + *-beos*) + vendor=be + ;; + *-hpux*) + vendor=hp + ;; + *-mpeix*) + vendor=hp + ;; + *-hiux*) + vendor=hitachi + ;; + *-unos*) + vendor=crds + ;; + *-dgux*) + vendor=dg + ;; + *-luna*) + vendor=omron + ;; + *-genix*) + vendor=ns + ;; + *-clix*) + vendor=intergraph + ;; + *-mvs* | *-opened*) + vendor=ibm + ;; + *-os400*) + vendor=ibm + ;; + s390-* | s390x-*) + vendor=ibm + ;; + *-ptx*) + vendor=sequent + ;; + *-tpf*) + vendor=ibm + ;; + *-vxsim* | *-vxworks* | *-windiss*) + vendor=wrs + ;; + *-aux*) + vendor=apple + ;; + *-hms*) + vendor=hitachi + ;; + *-mpw* | *-macos*) + vendor=apple + ;; + *-*mint | *-mint[0-9]* | *-*MiNT | *-MiNT[0-9]*) + vendor=atari + ;; + *-vos*) + vendor=stratus + ;; + esac + ;; +esac + +echo "$cpu-$vendor-${kernel:+$kernel-}$os" +exit + +# Local variables: +# eval: (add-hook 'before-save-hook 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/vendor/riscv-isa-sim/scripts/install.sh b/vendor/riscv-isa-sim/scripts/install.sh new file mode 100755 index 00000000..89fc9b09 --- /dev/null +++ b/vendor/riscv-isa-sim/scripts/install.sh @@ -0,0 +1,238 @@ +#! /bin/sh +# +# install - install a program, script, or datafile +# This comes from X11R5. +# +# Calling this script install-sh is preferred over install.sh, to prevent +# `make' implicit rules from creating a file called install from it +# when there is no Makefile. +# +# This script is compatible with the BSD install script, but was written +# from scratch. +# + + +# set DOITPROG to echo to test this script + +# Don't use :- since 4.3BSD and earlier shells don't like it. +doit="${DOITPROG-}" + + +# put in absolute paths if you don't have them in your path; or use env. vars. + +mvprog="${MVPROG-mv}" +cpprog="${CPPROG-cp}" +chmodprog="${CHMODPROG-chmod}" +chownprog="${CHOWNPROG-chown}" +chgrpprog="${CHGRPPROG-chgrp}" +stripprog="${STRIPPROG-strip}" +rmprog="${RMPROG-rm}" +mkdirprog="${MKDIRPROG-mkdir}" + +tranformbasename="" +transform_arg="" +instcmd="$mvprog" +chmodcmd="$chmodprog 0755" +chowncmd="" +chgrpcmd="" +stripcmd="" +rmcmd="$rmprog -f" +mvcmd="$mvprog" +src="" +dst="" +dir_arg="" + +while [ x"$1" != x ]; do + case $1 in + -c) instcmd="$cpprog" + shift + continue;; + + -d) dir_arg=true + shift + continue;; + + -m) chmodcmd="$chmodprog $2" + shift + shift + continue;; + + -o) chowncmd="$chownprog $2" + shift + shift + continue;; + + -g) chgrpcmd="$chgrpprog $2" + shift + shift + continue;; + + -s) stripcmd="$stripprog" + shift + continue;; + + -t=*) transformarg=`echo $1 | sed 's/-t=//'` + shift + continue;; + + -b=*) transformbasename=`echo $1 | sed 's/-b=//'` + shift + continue;; + + *) if [ x"$src" = x ] + then + src=$1 + else + # this colon is to work around a 386BSD /bin/sh bug + : + dst=$1 + fi + shift + continue;; + esac +done + +if [ x"$src" = x ] +then + echo "install: no input file specified" + exit 1 +else + true +fi + +if [ x"$dir_arg" != x ]; then + dst=$src + src="" + + if [ -d $dst ]; then + instcmd=: + else + instcmd=mkdir + fi +else + +# Waiting for this to be detected by the "$instcmd $src $dsttmp" command +# might cause directories to be created, which would be especially bad +# if $src (and thus $dsttmp) contains '*'. + + if [ -f $src -o -d $src ] + then + true + else + echo "install: $src does not exist" + exit 1 + fi + + if [ x"$dst" = x ] + then + echo "install: no destination specified" + exit 1 + else + true + fi + +# If destination is a directory, append the input filename; if your system +# does not like double slashes in filenames, you may need to add some logic + + if [ -d $dst ] + then + dst="$dst"/`basename $src` + else + true + fi +fi + +## this sed command emulates the dirname command +dstdir=`echo $dst | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'` + +# Make sure that the destination directory exists. +# this part is taken from Noah Friedman's mkinstalldirs script + +# Skip lots of stat calls in the usual case. +if [ ! -d "$dstdir" ]; then +defaultIFS=' +' +IFS="${IFS-${defaultIFS}}" + +oIFS="${IFS}" +# Some sh's can't handle IFS=/ for some reason. +IFS='%' +set - `echo ${dstdir} | sed -e 's@/@%@g' -e 's@^%@/@'` +IFS="${oIFS}" + +pathcomp='' + +while [ $# -ne 0 ] ; do + pathcomp="${pathcomp}${1}" + shift + + if [ ! -d "${pathcomp}" ] ; + then + $mkdirprog "${pathcomp}" + else + true + fi + + pathcomp="${pathcomp}/" +done +fi + +if [ x"$dir_arg" != x ] +then + $doit $instcmd $dst && + + if [ x"$chowncmd" != x ]; then $doit $chowncmd $dst; else true ; fi && + if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dst; else true ; fi && + if [ x"$stripcmd" != x ]; then $doit $stripcmd $dst; else true ; fi && + if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dst; else true ; fi +else + +# If we're going to rename the final executable, determine the name now. + + if [ x"$transformarg" = x ] + then + dstfile=`basename $dst` + else + dstfile=`basename $dst $transformbasename | + sed $transformarg`$transformbasename + fi + +# don't allow the sed command to completely eliminate the filename + + if [ x"$dstfile" = x ] + then + dstfile=`basename $dst` + else + true + fi + +# Make a temp file name in the proper directory. + + dsttmp=$dstdir/#inst.$$# + +# Move or copy the file name to the temp name + + $doit $instcmd $src $dsttmp && + + trap "rm -f ${dsttmp}" 0 && + +# and set any options; do chmod last to preserve setuid bits + +# If any of these fail, we abort the whole thing. If we want to +# ignore errors from any of these, just make sure not to ignore +# errors from the above "$doit $instcmd $src $dsttmp" command. + + if [ x"$chowncmd" != x ]; then $doit $chowncmd $dsttmp; else true;fi && + if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dsttmp; else true;fi && + if [ x"$stripcmd" != x ]; then $doit $stripcmd $dsttmp; else true;fi && + if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dsttmp; else true;fi && + +# Now rename the file to the real destination. + + $doit $rmcmd -f $dstdir/$dstfile && + $doit $mvcmd $dsttmp $dstdir/$dstfile + +fi && + + +exit 0 diff --git a/vendor/riscv-isa-sim/scripts/mk-install-dirs.sh b/vendor/riscv-isa-sim/scripts/mk-install-dirs.sh new file mode 100755 index 00000000..644b5f72 --- /dev/null +++ b/vendor/riscv-isa-sim/scripts/mk-install-dirs.sh @@ -0,0 +1,40 @@ +#! /bin/sh +# mkinstalldirs --- make directory hierarchy +# Author: Noah Friedman +# Created: 1993-05-16 +# Public domain + +# $Id: mkinstalldirs,v 1.1 2003/09/09 22:24:03 mhampton Exp $ + +errstatus=0 + +for file +do + set fnord `echo ":$file" | sed -ne 's/^:\//#/;s/^://;s/\// /g;s/^#/\//;p'` + shift + + pathcomp= + for d + do + pathcomp="$pathcomp$d" + case "$pathcomp" in + -* ) pathcomp=./$pathcomp ;; + esac + + if test ! -d "$pathcomp"; then + echo "mkdir $pathcomp" 1>&2 + + mkdir "$pathcomp" || lasterr=$? + + if test ! -d "$pathcomp"; then + errstatus=$lasterr + fi + fi + + pathcomp="$pathcomp/" + done +done + +exit $errstatus + +# mkinstalldirs ends here diff --git a/vendor/riscv-isa-sim/scripts/vcs-version.sh b/vendor/riscv-isa-sim/scripts/vcs-version.sh new file mode 100755 index 00000000..692c071e --- /dev/null +++ b/vendor/riscv-isa-sim/scripts/vcs-version.sh @@ -0,0 +1,117 @@ +#!/usr/bin/env bash +#========================================================================= +# vcs-version.sh [options] [src-dir] +#========================================================================= +# +# -h Display this message +# -v Verbose mode +# +# This script will create a version string by querying a version control +# system. The string is appropriate for use in installations and +# distributions. Currently this script assumes we are using git as our +# version control system but it would be possible to check and see if we +# are using an alternative version control system and create a version +# string appropriately. +# +# The script uses git describe plus a few other git commands to create a +# version strings in the following format: +# +# X.Y[-Z-gN][-dirty] +# +# where X is the major release, Y is the minor release, Z is the number +# of commits since the X.Y release, N is an eight digit abbreviated SHA +# hash of the most recent commit and the dirty suffix is appended when +# the working directory used to create the installation or distribution +# is not a pristine checkout. Here are some example version strings: +# +# 0.0 : initial import +# 0.0-3-g99ef6933 : 3rd commit since initial import (N=99ef6933) +# 1.0 : release 1.0 +# 1.1-12-g3487ab12 : 12th commit since release 1.1 (N=3487ab12) +# 1.1-12-g3487ab12-dirty : 12th commit since release 1.1 (N=3487ab12) +# +# The last example is from a dirty working directory. To find the last +# release, the script looks for the last tag (does not need to be an +# annotated tag, but probably should be) which matches the format rel-*. +# If there is no such tag in the history, then the script uses 0.0 as +# the release number and counts the total number of commits since the +# original import for the commit count. +# +# If the current directory is not within the working directory, then the +# path to the source directory should be supplied on the command line. +# +# Author : Christopher Batten +# Date : August 5, 2009 + +set -e + +#------------------------------------------------------------------------- +# Command line parsing +#------------------------------------------------------------------------- + +if ( test "$1" = "-h" ); then + echo "" + sed -n '3p' $0 | sed -e 's/#//' + sed -n '5,/^$/p' $0 | sed -e 's/#//' + exit 1 +fi + +# Source directory command line option + +src_dir="." +if ( test -n "$1" ); then + src_dir="$1" +fi + +#------------------------------------------------------------------------- +# Verify source directory +#------------------------------------------------------------------------- +# If the source directory is not a git working directory output a +# question mark. A distribution will not be in a working directory, but +# the build system should be structured such that this script is not +# executed (and instead the version information should probably come +# from configure). If the user does not specify a source directory use +# the current directory. + +if !( git rev-parse --is-inside-work-tree &> /dev/null ); then + echo "?" + exit 1; +fi + +top_dir=`git rev-parse --show-cdup` +cd ./${top_dir} + +#------------------------------------------------------------------------- +# Create the version string +#------------------------------------------------------------------------- +# See if we can do a describe based on a tag and if not use a default +# release number of 0.0 so that we always get canonical version number + +if ( git describe --tags --match "rel-*" &> /dev/null ); then + ver_str=`git describe --tags --match "rel-*" | sed 's/rel-//'` +else + ver_num="0.0" + ver_commits=`git rev-list --all | wc -l | tr -d " "` + ver_sha=`git describe --tags --match "rel-*" --always` + ver_str="${ver_num}-${ver_commits}-g${ver_sha}" +fi + +# Add a dirty suffix if working directory is dirty + +if !( git diff --quiet ); then + ver_str="${ver_str}-dirty" +else + untracked=`git ls-files --directory --exclude-standard --others -t` + if ( test -n "${untracked}" ); then + ver_str="${ver_str}-dirty" + fi +fi + +# Output the final version string + +echo "${ver_str}" + +# Final exit status + +exit 0; + diff --git a/vendor/riscv-isa-sim/softfloat/f128_add.c b/vendor/riscv-isa-sim/softfloat/f128_add.c new file mode 100644 index 00000000..6568ab6f --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_add.c @@ -0,0 +1,78 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float128_t f128_add( float128_t a, float128_t b ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + bool signA; + union ui128_f128 uB; + uint_fast64_t uiB64, uiB0; + bool signB; +#if ! defined INLINE_LEVEL || (INLINE_LEVEL < 2) + float128_t + (*magsFuncPtr)( + uint_fast64_t, uint_fast64_t, uint_fast64_t, uint_fast64_t, bool ); +#endif + + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + signA = signF128UI64( uiA64 ); + uB.f = b; + uiB64 = uB.ui.v64; + uiB0 = uB.ui.v0; + signB = signF128UI64( uiB64 ); +#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL) + if ( signA == signB ) { + return softfloat_addMagsF128( uiA64, uiA0, uiB64, uiB0, signA ); + } else { + return softfloat_subMagsF128( uiA64, uiA0, uiB64, uiB0, signA ); + } +#else + magsFuncPtr = + (signA == signB) ? softfloat_addMagsF128 : softfloat_subMagsF128; + return (*magsFuncPtr)( uiA64, uiA0, uiB64, uiB0, signA ); +#endif + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_classify.c b/vendor/riscv-isa-sim/softfloat/f128_classify.c new file mode 100755 index 00000000..1092a9b5 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_classify.c @@ -0,0 +1,37 @@ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast16_t f128_classify( float128_t a ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + + uint_fast16_t infOrNaN = expF128UI64( uiA64 ) == 0x7FFF; + uint_fast16_t subnormalOrZero = expF128UI64( uiA64 ) == 0; + bool sign = signF128UI64( uiA64 ); + bool fracZero = fracF128UI64( uiA64 ) == 0 && uiA0 == 0; + bool isNaN = isNaNF128UI( uiA64, uiA0 ); + bool isSNaN = softfloat_isSigNaNF128UI( uiA64, uiA0 ); + + return + ( sign && infOrNaN && fracZero ) << 0 | + ( sign && !infOrNaN && !subnormalOrZero ) << 1 | + ( sign && subnormalOrZero && !fracZero ) << 2 | + ( sign && subnormalOrZero && fracZero ) << 3 | + ( !sign && infOrNaN && fracZero ) << 7 | + ( !sign && !infOrNaN && !subnormalOrZero ) << 6 | + ( !sign && subnormalOrZero && !fracZero ) << 5 | + ( !sign && subnormalOrZero && fracZero ) << 4 | + ( isNaN && isSNaN ) << 8 | + ( isNaN && !isSNaN ) << 9; +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_div.c b/vendor/riscv-isa-sim/softfloat/f128_div.c new file mode 100644 index 00000000..9384e756 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_div.c @@ -0,0 +1,199 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float128_t f128_div( float128_t a, float128_t b ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + bool signA; + int_fast32_t expA; + struct uint128 sigA; + union ui128_f128 uB; + uint_fast64_t uiB64, uiB0; + bool signB; + int_fast32_t expB; + struct uint128 sigB; + bool signZ; + struct exp32_sig128 normExpSig; + int_fast32_t expZ; + struct uint128 rem; + uint_fast32_t recip32; + int ix; + uint_fast64_t q64; + uint_fast32_t q; + struct uint128 term; + uint_fast32_t qs[3]; + uint_fast64_t sigZExtra; + struct uint128 sigZ, uiZ; + union ui128_f128 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + signA = signF128UI64( uiA64 ); + expA = expF128UI64( uiA64 ); + sigA.v64 = fracF128UI64( uiA64 ); + sigA.v0 = uiA0; + uB.f = b; + uiB64 = uB.ui.v64; + uiB0 = uB.ui.v0; + signB = signF128UI64( uiB64 ); + expB = expF128UI64( uiB64 ); + sigB.v64 = fracF128UI64( uiB64 ); + sigB.v0 = uiB0; + signZ = signA ^ signB; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FFF ) { + if ( sigA.v64 | sigA.v0 ) goto propagateNaN; + if ( expB == 0x7FFF ) { + if ( sigB.v64 | sigB.v0 ) goto propagateNaN; + goto invalid; + } + goto infinity; + } + if ( expB == 0x7FFF ) { + if ( sigB.v64 | sigB.v0 ) goto propagateNaN; + goto zero; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expB ) { + if ( ! (sigB.v64 | sigB.v0) ) { + if ( ! (expA | sigA.v64 | sigA.v0) ) goto invalid; + softfloat_raiseFlags( softfloat_flag_infinite ); + goto infinity; + } + normExpSig = softfloat_normSubnormalF128Sig( sigB.v64, sigB.v0 ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + if ( ! expA ) { + if ( ! (sigA.v64 | sigA.v0) ) goto zero; + normExpSig = softfloat_normSubnormalF128Sig( sigA.v64, sigA.v0 ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA - expB + 0x3FFE; + sigA.v64 |= UINT64_C( 0x0001000000000000 ); + sigB.v64 |= UINT64_C( 0x0001000000000000 ); + rem = sigA; + if ( softfloat_lt128( sigA.v64, sigA.v0, sigB.v64, sigB.v0 ) ) { + --expZ; + rem = softfloat_add128( sigA.v64, sigA.v0, sigA.v64, sigA.v0 ); + } + recip32 = softfloat_approxRecip32_1( sigB.v64>>17 ); + ix = 3; + for (;;) { + q64 = (uint_fast64_t) (uint32_t) (rem.v64>>19) * recip32; + q = (q64 + 0x80000000)>>32; + --ix; + if ( ix < 0 ) break; + rem = softfloat_shortShiftLeft128( rem.v64, rem.v0, 29 ); + term = softfloat_mul128By32( sigB.v64, sigB.v0, q ); + rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); + if ( rem.v64 & UINT64_C( 0x8000000000000000 ) ) { + --q; + rem = softfloat_add128( rem.v64, rem.v0, sigB.v64, sigB.v0 ); + } + qs[ix] = q; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ((q + 1) & 7) < 2 ) { + rem = softfloat_shortShiftLeft128( rem.v64, rem.v0, 29 ); + term = softfloat_mul128By32( sigB.v64, sigB.v0, q ); + rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); + if ( rem.v64 & UINT64_C( 0x8000000000000000 ) ) { + --q; + rem = softfloat_add128( rem.v64, rem.v0, sigB.v64, sigB.v0 ); + } else if ( softfloat_le128( sigB.v64, sigB.v0, rem.v64, rem.v0 ) ) { + ++q; + rem = softfloat_sub128( rem.v64, rem.v0, sigB.v64, sigB.v0 ); + } + if ( rem.v64 | rem.v0 ) q |= 1; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sigZExtra = (uint64_t) ((uint_fast64_t) q<<60); + term = softfloat_shortShiftLeft128( 0, qs[1], 54 ); + sigZ = + softfloat_add128( + (uint_fast64_t) qs[2]<<19, ((uint_fast64_t) qs[0]<<25) + (q>>4), + term.v64, term.v0 + ); + return + softfloat_roundPackToF128( signZ, expZ, sigZ.v64, sigZ.v0, sigZExtra ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF128UI( uiA64, uiA0, uiB64, uiB0 ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ.v64 = defaultNaNF128UI64; + uiZ.v0 = defaultNaNF128UI0; + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infinity: + uiZ.v64 = packToF128UI64( signZ, 0x7FFF, 0 ); + goto uiZ0; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zero: + uiZ.v64 = packToF128UI64( signZ, 0, 0 ); + uiZ0: + uiZ.v0 = 0; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_eq.c b/vendor/riscv-isa-sim/softfloat/f128_eq.c new file mode 100644 index 00000000..a0e1ad28 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_eq.c @@ -0,0 +1,73 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f128_eq( float128_t a, float128_t b ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + union ui128_f128 uB; + uint_fast64_t uiB64, uiB0; + + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + uB.f = b; + uiB64 = uB.ui.v64; + uiB0 = uB.ui.v0; + if ( isNaNF128UI( uiA64, uiA0 ) || isNaNF128UI( uiB64, uiB0 ) ) { + if ( + softfloat_isSigNaNF128UI( uiA64, uiA0 ) + || softfloat_isSigNaNF128UI( uiB64, uiB0 ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + return + (uiA0 == uiB0) + && ( (uiA64 == uiB64) + || (! uiA0 && ! ((uiA64 | uiB64) & UINT64_C( 0x7FFFFFFFFFFFFFFF ))) + ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_eq_signaling.c b/vendor/riscv-isa-sim/softfloat/f128_eq_signaling.c new file mode 100644 index 00000000..bd37b979 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_eq_signaling.c @@ -0,0 +1,67 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f128_eq_signaling( float128_t a, float128_t b ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + union ui128_f128 uB; + uint_fast64_t uiB64, uiB0; + + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + uB.f = b; + uiB64 = uB.ui.v64; + uiB0 = uB.ui.v0; + if ( isNaNF128UI( uiA64, uiA0 ) || isNaNF128UI( uiB64, uiB0 ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + return + (uiA0 == uiB0) + && ( (uiA64 == uiB64) + || (! uiA0 && ! ((uiA64 | uiB64) & UINT64_C( 0x7FFFFFFFFFFFFFFF ))) + ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_isSignalingNaN.c b/vendor/riscv-isa-sim/softfloat/f128_isSignalingNaN.c new file mode 100644 index 00000000..fced58e5 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_isSignalingNaN.c @@ -0,0 +1,51 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f128_isSignalingNaN( float128_t a ) +{ + union ui128_f128 uA; + + uA.f = a; + return softfloat_isSigNaNF128UI( uA.ui.v64, uA.ui.v0 ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_le.c b/vendor/riscv-isa-sim/softfloat/f128_le.c new file mode 100644 index 00000000..9b0aa234 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_le.c @@ -0,0 +1,72 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f128_le( float128_t a, float128_t b ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + union ui128_f128 uB; + uint_fast64_t uiB64, uiB0; + bool signA, signB; + + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + uB.f = b; + uiB64 = uB.ui.v64; + uiB0 = uB.ui.v0; + if ( isNaNF128UI( uiA64, uiA0 ) || isNaNF128UI( uiB64, uiB0 ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + signA = signF128UI64( uiA64 ); + signB = signF128UI64( uiB64 ); + return + (signA != signB) + ? signA + || ! (((uiA64 | uiB64) & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + | uiA0 | uiB0) + : ((uiA64 == uiB64) && (uiA0 == uiB0)) + || (signA ^ softfloat_lt128( uiA64, uiA0, uiB64, uiB0 )); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_le_quiet.c b/vendor/riscv-isa-sim/softfloat/f128_le_quiet.c new file mode 100644 index 00000000..3b440388 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_le_quiet.c @@ -0,0 +1,78 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f128_le_quiet( float128_t a, float128_t b ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + union ui128_f128 uB; + uint_fast64_t uiB64, uiB0; + bool signA, signB; + + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + uB.f = b; + uiB64 = uB.ui.v64; + uiB0 = uB.ui.v0; + if ( isNaNF128UI( uiA64, uiA0 ) || isNaNF128UI( uiB64, uiB0 ) ) { + if ( + softfloat_isSigNaNF128UI( uiA64, uiA0 ) + || softfloat_isSigNaNF128UI( uiB64, uiB0 ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + signA = signF128UI64( uiA64 ); + signB = signF128UI64( uiB64 ); + return + (signA != signB) + ? signA + || ! (((uiA64 | uiB64) & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + | uiA0 | uiB0) + : ((uiA64 == uiB64) && (uiA0 == uiB0)) + || (signA ^ softfloat_lt128( uiA64, uiA0, uiB64, uiB0 )); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_lt.c b/vendor/riscv-isa-sim/softfloat/f128_lt.c new file mode 100644 index 00000000..a28f95b7 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_lt.c @@ -0,0 +1,72 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f128_lt( float128_t a, float128_t b ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + union ui128_f128 uB; + uint_fast64_t uiB64, uiB0; + bool signA, signB; + + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + uB.f = b; + uiB64 = uB.ui.v64; + uiB0 = uB.ui.v0; + if ( isNaNF128UI( uiA64, uiA0 ) || isNaNF128UI( uiB64, uiB0 ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + signA = signF128UI64( uiA64 ); + signB = signF128UI64( uiB64 ); + return + (signA != signB) + ? signA + && (((uiA64 | uiB64) & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + | uiA0 | uiB0) + : ((uiA64 != uiB64) || (uiA0 != uiB0)) + && (signA ^ softfloat_lt128( uiA64, uiA0, uiB64, uiB0 )); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_lt_quiet.c b/vendor/riscv-isa-sim/softfloat/f128_lt_quiet.c new file mode 100644 index 00000000..20146ee4 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_lt_quiet.c @@ -0,0 +1,78 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f128_lt_quiet( float128_t a, float128_t b ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + union ui128_f128 uB; + uint_fast64_t uiB64, uiB0; + bool signA, signB; + + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + uB.f = b; + uiB64 = uB.ui.v64; + uiB0 = uB.ui.v0; + if ( isNaNF128UI( uiA64, uiA0 ) || isNaNF128UI( uiB64, uiB0 ) ) { + if ( + softfloat_isSigNaNF128UI( uiA64, uiA0 ) + || softfloat_isSigNaNF128UI( uiB64, uiB0 ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + signA = signF128UI64( uiA64 ); + signB = signF128UI64( uiB64 ); + return + (signA != signB) + ? signA + && (((uiA64 | uiB64) & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + | uiA0 | uiB0) + : ((uiA64 != uiB64) || (uiA0 != uiB0)) + && (signA ^ softfloat_lt128( uiA64, uiA0, uiB64, uiB0 )); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_mul.c b/vendor/riscv-isa-sim/softfloat/f128_mul.c new file mode 100644 index 00000000..18716139 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_mul.c @@ -0,0 +1,163 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float128_t f128_mul( float128_t a, float128_t b ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + bool signA; + int_fast32_t expA; + struct uint128 sigA; + union ui128_f128 uB; + uint_fast64_t uiB64, uiB0; + bool signB; + int_fast32_t expB; + struct uint128 sigB; + bool signZ; + uint_fast64_t magBits; + struct exp32_sig128 normExpSig; + int_fast32_t expZ; + uint64_t sig256Z[4]; + uint_fast64_t sigZExtra; + struct uint128 sigZ; + struct uint128_extra sig128Extra; + struct uint128 uiZ; + union ui128_f128 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + signA = signF128UI64( uiA64 ); + expA = expF128UI64( uiA64 ); + sigA.v64 = fracF128UI64( uiA64 ); + sigA.v0 = uiA0; + uB.f = b; + uiB64 = uB.ui.v64; + uiB0 = uB.ui.v0; + signB = signF128UI64( uiB64 ); + expB = expF128UI64( uiB64 ); + sigB.v64 = fracF128UI64( uiB64 ); + sigB.v0 = uiB0; + signZ = signA ^ signB; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FFF ) { + if ( + (sigA.v64 | sigA.v0) || ((expB == 0x7FFF) && (sigB.v64 | sigB.v0)) + ) { + goto propagateNaN; + } + magBits = expB | sigB.v64 | sigB.v0; + goto infArg; + } + if ( expB == 0x7FFF ) { + if ( sigB.v64 | sigB.v0 ) goto propagateNaN; + magBits = expA | sigA.v64 | sigA.v0; + goto infArg; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! (sigA.v64 | sigA.v0) ) goto zero; + normExpSig = softfloat_normSubnormalF128Sig( sigA.v64, sigA.v0 ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + if ( ! expB ) { + if ( ! (sigB.v64 | sigB.v0) ) goto zero; + normExpSig = softfloat_normSubnormalF128Sig( sigB.v64, sigB.v0 ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA + expB - 0x4000; + sigA.v64 |= UINT64_C( 0x0001000000000000 ); + sigB = softfloat_shortShiftLeft128( sigB.v64, sigB.v0, 16 ); + softfloat_mul128To256M( sigA.v64, sigA.v0, sigB.v64, sigB.v0, sig256Z ); + sigZExtra = sig256Z[indexWord( 4, 1 )] | (sig256Z[indexWord( 4, 0 )] != 0); + sigZ = + softfloat_add128( + sig256Z[indexWord( 4, 3 )], sig256Z[indexWord( 4, 2 )], + sigA.v64, sigA.v0 + ); + if ( UINT64_C( 0x0002000000000000 ) <= sigZ.v64 ) { + ++expZ; + sig128Extra = + softfloat_shortShiftRightJam128Extra( + sigZ.v64, sigZ.v0, sigZExtra, 1 ); + sigZ = sig128Extra.v; + sigZExtra = sig128Extra.extra; + } + return + softfloat_roundPackToF128( signZ, expZ, sigZ.v64, sigZ.v0, sigZExtra ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF128UI( uiA64, uiA0, uiB64, uiB0 ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infArg: + if ( ! magBits ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ.v64 = defaultNaNF128UI64; + uiZ.v0 = defaultNaNF128UI0; + goto uiZ; + } + uiZ.v64 = packToF128UI64( signZ, 0x7FFF, 0 ); + goto uiZ0; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zero: + uiZ.v64 = packToF128UI64( signZ, 0, 0 ); + uiZ0: + uiZ.v0 = 0; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_mulAdd.c b/vendor/riscv-isa-sim/softfloat/f128_mulAdd.c new file mode 100644 index 00000000..b2e2142f --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_mulAdd.c @@ -0,0 +1,63 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float128_t f128_mulAdd( float128_t a, float128_t b, float128_t c ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + union ui128_f128 uB; + uint_fast64_t uiB64, uiB0; + union ui128_f128 uC; + uint_fast64_t uiC64, uiC0; + + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + uB.f = b; + uiB64 = uB.ui.v64; + uiB0 = uB.ui.v0; + uC.f = c; + uiC64 = uC.ui.v64; + uiC0 = uC.ui.v0; + return softfloat_mulAddF128( uiA64, uiA0, uiB64, uiB0, uiC64, uiC0, 0 ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_rem.c b/vendor/riscv-isa-sim/softfloat/f128_rem.c new file mode 100644 index 00000000..555d71eb --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_rem.c @@ -0,0 +1,190 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float128_t f128_rem( float128_t a, float128_t b ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + bool signA; + int_fast32_t expA; + struct uint128 sigA; + union ui128_f128 uB; + uint_fast64_t uiB64, uiB0; + int_fast32_t expB; + struct uint128 sigB; + struct exp32_sig128 normExpSig; + struct uint128 rem; + int_fast32_t expDiff; + uint_fast32_t q, recip32; + uint_fast64_t q64; + struct uint128 term, altRem, meanRem; + bool signRem; + struct uint128 uiZ; + union ui128_f128 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + signA = signF128UI64( uiA64 ); + expA = expF128UI64( uiA64 ); + sigA.v64 = fracF128UI64( uiA64 ); + sigA.v0 = uiA0; + uB.f = b; + uiB64 = uB.ui.v64; + uiB0 = uB.ui.v0; + expB = expF128UI64( uiB64 ); + sigB.v64 = fracF128UI64( uiB64 ); + sigB.v0 = uiB0; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FFF ) { + if ( + (sigA.v64 | sigA.v0) || ((expB == 0x7FFF) && (sigB.v64 | sigB.v0)) + ) { + goto propagateNaN; + } + goto invalid; + } + if ( expB == 0x7FFF ) { + if ( sigB.v64 | sigB.v0 ) goto propagateNaN; + return a; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expB ) { + if ( ! (sigB.v64 | sigB.v0) ) goto invalid; + normExpSig = softfloat_normSubnormalF128Sig( sigB.v64, sigB.v0 ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + if ( ! expA ) { + if ( ! (sigA.v64 | sigA.v0) ) return a; + normExpSig = softfloat_normSubnormalF128Sig( sigA.v64, sigA.v0 ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sigA.v64 |= UINT64_C( 0x0001000000000000 ); + sigB.v64 |= UINT64_C( 0x0001000000000000 ); + rem = sigA; + expDiff = expA - expB; + if ( expDiff < 1 ) { + if ( expDiff < -1 ) return a; + if ( expDiff ) { + --expB; + sigB = softfloat_add128( sigB.v64, sigB.v0, sigB.v64, sigB.v0 ); + q = 0; + } else { + q = softfloat_le128( sigB.v64, sigB.v0, rem.v64, rem.v0 ); + if ( q ) { + rem = softfloat_sub128( rem.v64, rem.v0, sigB.v64, sigB.v0 ); + } + } + } else { + recip32 = softfloat_approxRecip32_1( sigB.v64>>17 ); + expDiff -= 30; + for (;;) { + q64 = (uint_fast64_t) (uint32_t) (rem.v64>>19) * recip32; + if ( expDiff < 0 ) break; + q = (q64 + 0x80000000)>>32; + rem = softfloat_shortShiftLeft128( rem.v64, rem.v0, 29 ); + term = softfloat_mul128By32( sigB.v64, sigB.v0, q ); + rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); + if ( rem.v64 & UINT64_C( 0x8000000000000000 ) ) { + rem = softfloat_add128( rem.v64, rem.v0, sigB.v64, sigB.v0 ); + } + expDiff -= 29; + } + /*-------------------------------------------------------------------- + | (`expDiff' cannot be less than -29 here.) + *--------------------------------------------------------------------*/ + q = (uint32_t) (q64>>32)>>(~expDiff & 31); + rem = softfloat_shortShiftLeft128( rem.v64, rem.v0, expDiff + 30 ); + term = softfloat_mul128By32( sigB.v64, sigB.v0, q ); + rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); + if ( rem.v64 & UINT64_C( 0x8000000000000000 ) ) { + altRem = softfloat_add128( rem.v64, rem.v0, sigB.v64, sigB.v0 ); + goto selectRem; + } + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + do { + altRem = rem; + ++q; + rem = softfloat_sub128( rem.v64, rem.v0, sigB.v64, sigB.v0 ); + } while ( ! (rem.v64 & UINT64_C( 0x8000000000000000 )) ); + selectRem: + meanRem = softfloat_add128( rem.v64, rem.v0, altRem.v64, altRem.v0 ); + if ( + (meanRem.v64 & UINT64_C( 0x8000000000000000 )) + || (! (meanRem.v64 | meanRem.v0) && (q & 1)) + ) { + rem = altRem; + } + signRem = signA; + if ( rem.v64 & UINT64_C( 0x8000000000000000 ) ) { + signRem = ! signRem; + rem = softfloat_sub128( 0, 0, rem.v64, rem.v0 ); + } + return softfloat_normRoundPackToF128( signRem, expB - 1, rem.v64, rem.v0 ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF128UI( uiA64, uiA0, uiB64, uiB0 ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ.v64 = defaultNaNF128UI64; + uiZ.v0 = defaultNaNF128UI0; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_roundToInt.c b/vendor/riscv-isa-sim/softfloat/f128_roundToInt.c new file mode 100644 index 00000000..0f1f07ec --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_roundToInt.c @@ -0,0 +1,160 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float128_t + f128_roundToInt( float128_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + int_fast32_t exp; + struct uint128 uiZ; + uint_fast64_t lastBitMask, roundBitsMask; + bool roundNearEven; + union ui128_f128 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + exp = expF128UI64( uiA64 ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( 0x402F <= exp ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( 0x406F <= exp ) { + if ( (exp == 0x7FFF) && (fracF128UI64( uiA64 ) | uiA0) ) { + uiZ = softfloat_propagateNaNF128UI( uiA64, uiA0, 0, 0 ); + goto uiZ; + } + return a; + } + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + lastBitMask = (uint_fast64_t) 2<<(0x406E - exp); + roundBitsMask = lastBitMask - 1; + uiZ.v64 = uiA64; + uiZ.v0 = uiA0; + roundNearEven = (roundingMode == softfloat_round_near_even); + if ( roundNearEven || (roundingMode == softfloat_round_near_maxMag) ) { + if ( exp == 0x402F ) { + if ( UINT64_C( 0x8000000000000000 ) <= uiZ.v0 ) { + ++uiZ.v64; + if ( + roundNearEven + && (uiZ.v0 == UINT64_C( 0x8000000000000000 )) + ) { + uiZ.v64 &= ~1; + } + } + } else { + uiZ = softfloat_add128( uiZ.v64, uiZ.v0, 0, lastBitMask>>1 ); + if ( roundNearEven && ! (uiZ.v0 & roundBitsMask) ) { + uiZ.v0 &= ~lastBitMask; + } + } + } else if ( + roundingMode + == (signF128UI64( uiZ.v64 ) ? softfloat_round_min + : softfloat_round_max) + ) { + uiZ = softfloat_add128( uiZ.v64, uiZ.v0, 0, roundBitsMask ); + } + uiZ.v0 &= ~roundBitsMask; + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( exp < 0x3FFF ) { + if ( ! ((uiA64 & UINT64_C( 0x7FFFFFFFFFFFFFFF )) | uiA0) ) { + return a; + } + if ( exact ) softfloat_exceptionFlags |= softfloat_flag_inexact; + uiZ.v64 = uiA64 & packToF128UI64( 1, 0, 0 ); + uiZ.v0 = 0; + switch ( roundingMode ) { + case softfloat_round_near_even: + if ( ! (fracF128UI64( uiA64 ) | uiA0) ) break; + case softfloat_round_near_maxMag: + if ( exp == 0x3FFE ) uiZ.v64 |= packToF128UI64( 0, 0x3FFF, 0 ); + break; + case softfloat_round_min: + if ( uiZ.v64 ) uiZ.v64 = packToF128UI64( 1, 0x3FFF, 0 ); + break; + case softfloat_round_max: + if ( ! uiZ.v64 ) uiZ.v64 = packToF128UI64( 0, 0x3FFF, 0 ); + break; + } + goto uiZ; + } + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + uiZ.v64 = uiA64; + uiZ.v0 = 0; + lastBitMask = (uint_fast64_t) 1<<(0x402F - exp); + roundBitsMask = lastBitMask - 1; + if ( roundingMode == softfloat_round_near_maxMag ) { + uiZ.v64 += lastBitMask>>1; + } else if ( roundingMode == softfloat_round_near_even ) { + uiZ.v64 += lastBitMask>>1; + if ( ! ((uiZ.v64 & roundBitsMask) | uiA0) ) { + uiZ.v64 &= ~lastBitMask; + } + } else if ( + roundingMode + == (signF128UI64( uiZ.v64 ) ? softfloat_round_min + : softfloat_round_max) + ) { + uiZ.v64 = (uiZ.v64 | (uiA0 != 0)) + roundBitsMask; + } + uiZ.v64 &= ~roundBitsMask; + } + if ( exact && ((uiZ.v64 != uiA64) || (uiZ.v0 != uiA0)) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_sqrt.c b/vendor/riscv-isa-sim/softfloat/f128_sqrt.c new file mode 100644 index 00000000..5b99694e --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_sqrt.c @@ -0,0 +1,201 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float128_t f128_sqrt( float128_t a ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + bool signA; + int_fast32_t expA; + struct uint128 sigA, uiZ; + struct exp32_sig128 normExpSig; + int_fast32_t expZ; + uint_fast32_t sig32A, recipSqrt32, sig32Z; + struct uint128 rem; + uint32_t qs[3]; + uint_fast32_t q; + uint_fast64_t x64, sig64Z; + struct uint128 y, term; + uint_fast64_t sigZExtra; + struct uint128 sigZ; + union ui128_f128 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + signA = signF128UI64( uiA64 ); + expA = expF128UI64( uiA64 ); + sigA.v64 = fracF128UI64( uiA64 ); + sigA.v0 = uiA0; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FFF ) { + if ( sigA.v64 | sigA.v0 ) { + uiZ = softfloat_propagateNaNF128UI( uiA64, uiA0, 0, 0 ); + goto uiZ; + } + if ( ! signA ) return a; + goto invalid; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( signA ) { + if ( ! (expA | sigA.v64 | sigA.v0) ) return a; + goto invalid; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! (sigA.v64 | sigA.v0) ) return a; + normExpSig = softfloat_normSubnormalF128Sig( sigA.v64, sigA.v0 ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + | (`sig32Z' is guaranteed to be a lower bound on the square root of + | `sig32A', which makes `sig32Z' also a lower bound on the square root of + | `sigA'.) + *------------------------------------------------------------------------*/ + expZ = ((expA - 0x3FFF)>>1) + 0x3FFE; + expA &= 1; + sigA.v64 |= UINT64_C( 0x0001000000000000 ); + sig32A = sigA.v64>>17; + recipSqrt32 = softfloat_approxRecipSqrt32_1( expA, sig32A ); + sig32Z = ((uint_fast64_t) sig32A * recipSqrt32)>>32; + if ( expA ) { + sig32Z >>= 1; + rem = softfloat_shortShiftLeft128( sigA.v64, sigA.v0, 12 ); + } else { + rem = softfloat_shortShiftLeft128( sigA.v64, sigA.v0, 13 ); + } + qs[2] = sig32Z; + rem.v64 -= (uint_fast64_t) sig32Z * sig32Z; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + q = ((uint32_t) (rem.v64>>2) * (uint_fast64_t) recipSqrt32)>>32; + x64 = (uint_fast64_t) sig32Z<<32; + sig64Z = x64 + ((uint_fast64_t) q<<3); + y = softfloat_shortShiftLeft128( rem.v64, rem.v0, 29 ); + /*------------------------------------------------------------------------ + | (Repeating this loop is a rare occurrence.) + *------------------------------------------------------------------------*/ + for (;;) { + term = softfloat_mul64ByShifted32To128( x64 + sig64Z, q ); + rem = softfloat_sub128( y.v64, y.v0, term.v64, term.v0 ); + if ( ! (rem.v64 & UINT64_C( 0x8000000000000000 )) ) break; + --q; + sig64Z -= 1<<3; + } + qs[1] = q; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + q = ((rem.v64>>2) * recipSqrt32)>>32; + y = softfloat_shortShiftLeft128( rem.v64, rem.v0, 29 ); + sig64Z <<= 1; + /*------------------------------------------------------------------------ + | (Repeating this loop is a rare occurrence.) + *------------------------------------------------------------------------*/ + for (;;) { + term = softfloat_shortShiftLeft128( 0, sig64Z, 32 ); + term = softfloat_add128( term.v64, term.v0, 0, (uint_fast64_t) q<<6 ); + term = softfloat_mul128By32( term.v64, term.v0, q ); + rem = softfloat_sub128( y.v64, y.v0, term.v64, term.v0 ); + if ( ! (rem.v64 & UINT64_C( 0x8000000000000000 )) ) break; + --q; + } + qs[0] = q; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + q = (((rem.v64>>2) * recipSqrt32)>>32) + 2; + sigZExtra = (uint64_t) ((uint_fast64_t) q<<59); + term = softfloat_shortShiftLeft128( 0, qs[1], 53 ); + sigZ = + softfloat_add128( + (uint_fast64_t) qs[2]<<18, ((uint_fast64_t) qs[0]<<24) + (q>>5), + term.v64, term.v0 + ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( (q & 0xF) <= 2 ) { + q &= ~3; + sigZExtra = (uint64_t) ((uint_fast64_t) q<<59); + y = softfloat_shortShiftLeft128( sigZ.v64, sigZ.v0, 6 ); + y.v0 |= sigZExtra>>58; + term = softfloat_sub128( y.v64, y.v0, 0, q ); + y = softfloat_mul64ByShifted32To128( term.v0, q ); + term = softfloat_mul64ByShifted32To128( term.v64, q ); + term = softfloat_add128( term.v64, term.v0, 0, y.v64 ); + rem = softfloat_shortShiftLeft128( rem.v64, rem.v0, 20 ); + term = softfloat_sub128( term.v64, term.v0, rem.v64, rem.v0 ); + /*-------------------------------------------------------------------- + | The concatenation of `term' and `y.v0' is now the negative remainder + | (3 words altogether). + *--------------------------------------------------------------------*/ + if ( term.v64 & UINT64_C( 0x8000000000000000 ) ) { + sigZExtra |= 1; + } else { + if ( term.v64 | term.v0 | y.v0 ) { + if ( sigZExtra ) { + --sigZExtra; + } else { + sigZ = softfloat_sub128( sigZ.v64, sigZ.v0, 0, 1 ); + sigZExtra = ~0; + } + } + } + } + return softfloat_roundPackToF128( 0, expZ, sigZ.v64, sigZ.v0, sigZExtra ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ.v64 = defaultNaNF128UI64; + uiZ.v0 = defaultNaNF128UI0; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_sub.c b/vendor/riscv-isa-sim/softfloat/f128_sub.c new file mode 100644 index 00000000..ce2e5adb --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_sub.c @@ -0,0 +1,78 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float128_t f128_sub( float128_t a, float128_t b ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + bool signA; + union ui128_f128 uB; + uint_fast64_t uiB64, uiB0; + bool signB; +#if ! defined INLINE_LEVEL || (INLINE_LEVEL < 2) + float128_t + (*magsFuncPtr)( + uint_fast64_t, uint_fast64_t, uint_fast64_t, uint_fast64_t, bool ); +#endif + + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + signA = signF128UI64( uiA64 ); + uB.f = b; + uiB64 = uB.ui.v64; + uiB0 = uB.ui.v0; + signB = signF128UI64( uiB64 ); +#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL) + if ( signA == signB ) { + return softfloat_subMagsF128( uiA64, uiA0, uiB64, uiB0, signA ); + } else { + return softfloat_addMagsF128( uiA64, uiA0, uiB64, uiB0, signA ); + } +#else + magsFuncPtr = + (signA == signB) ? softfloat_subMagsF128 : softfloat_addMagsF128; + return (*magsFuncPtr)( uiA64, uiA0, uiB64, uiB0, signA ); +#endif + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_to_f16.c b/vendor/riscv-isa-sim/softfloat/f128_to_f16.c new file mode 100644 index 00000000..a910c12a --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_to_f16.c @@ -0,0 +1,95 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float16_t f128_to_f16( float128_t a ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + bool sign; + int_fast32_t exp; + uint_fast64_t frac64; + struct commonNaN commonNaN; + uint_fast16_t uiZ, frac16; + union ui16_f16 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + sign = signF128UI64( uiA64 ); + exp = expF128UI64( uiA64 ); + frac64 = fracF128UI64( uiA64 ) | (uiA0 != 0); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0x7FFF ) { + if ( frac64 ) { + softfloat_f128UIToCommonNaN( uiA64, uiA0, &commonNaN ); + uiZ = softfloat_commonNaNToF16UI( &commonNaN ); + } else { + uiZ = packToF16UI( sign, 0x1F, 0 ); + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + frac16 = softfloat_shortShiftRightJam64( frac64, 34 ); + if ( ! (exp | frac16) ) { + uiZ = packToF16UI( sign, 0, 0 ); + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + exp -= 0x3FF1; + if ( sizeof (int_fast16_t) < sizeof (int_fast32_t) ) { + if ( exp < -0x40 ) exp = -0x40; + } + return softfloat_roundPackToF16( sign, exp, frac16 | 0x4000 ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_to_f32.c b/vendor/riscv-isa-sim/softfloat/f128_to_f32.c new file mode 100644 index 00000000..d890d3eb --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_to_f32.c @@ -0,0 +1,95 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f128_to_f32( float128_t a ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + bool sign; + int_fast32_t exp; + uint_fast64_t frac64; + struct commonNaN commonNaN; + uint_fast32_t uiZ, frac32; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + sign = signF128UI64( uiA64 ); + exp = expF128UI64( uiA64 ); + frac64 = fracF128UI64( uiA64 ) | (uiA0 != 0); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0x7FFF ) { + if ( frac64 ) { + softfloat_f128UIToCommonNaN( uiA64, uiA0, &commonNaN ); + uiZ = softfloat_commonNaNToF32UI( &commonNaN ); + } else { + uiZ = packToF32UI( sign, 0xFF, 0 ); + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + frac32 = softfloat_shortShiftRightJam64( frac64, 18 ); + if ( ! (exp | frac32) ) { + uiZ = packToF32UI( sign, 0, 0 ); + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + exp -= 0x3F81; + if ( sizeof (int_fast16_t) < sizeof (int_fast32_t) ) { + if ( exp < -0x1000 ) exp = -0x1000; + } + return softfloat_roundPackToF32( sign, exp, frac32 | 0x40000000 ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_to_f64.c b/vendor/riscv-isa-sim/softfloat/f128_to_f64.c new file mode 100644 index 00000000..e7aec201 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_to_f64.c @@ -0,0 +1,100 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f128_to_f64( float128_t a ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + bool sign; + int_fast32_t exp; + uint_fast64_t frac64, frac0; + struct commonNaN commonNaN; + uint_fast64_t uiZ; + struct uint128 frac128; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + sign = signF128UI64( uiA64 ); + exp = expF128UI64( uiA64 ); + frac64 = fracF128UI64( uiA64 ); + frac0 = uiA0; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0x7FFF ) { + if ( frac64 | frac0 ) { + softfloat_f128UIToCommonNaN( uiA64, uiA0, &commonNaN ); + uiZ = softfloat_commonNaNToF64UI( &commonNaN ); + } else { + uiZ = packToF64UI( sign, 0x7FF, 0 ); + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + frac128 = softfloat_shortShiftLeft128( frac64, frac0, 14 ); + frac64 = frac128.v64 | (frac128.v0 != 0); + if ( ! (exp | frac64) ) { + uiZ = packToF64UI( sign, 0, 0 ); + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + exp -= 0x3C01; + if ( sizeof (int_fast16_t) < sizeof (int_fast32_t) ) { + if ( exp < -0x1000 ) exp = -0x1000; + } + return + softfloat_roundPackToF64( + sign, exp, frac64 | UINT64_C( 0x4000000000000000 ) ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_to_i32.c b/vendor/riscv-isa-sim/softfloat/f128_to_i32.c new file mode 100644 index 00000000..507691cc --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_to_i32.c @@ -0,0 +1,85 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast32_t f128_to_i32( float128_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + bool sign; + int_fast32_t exp; + uint_fast64_t sig64, sig0; + int_fast32_t shiftDist; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + sign = signF128UI64( uiA64 ); + exp = expF128UI64( uiA64 ); + sig64 = fracF128UI64( uiA64 ); + sig0 = uiA0; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ +#if (i32_fromNaN != i32_fromPosOverflow) || (i32_fromNaN != i32_fromNegOverflow) + if ( (exp == 0x7FFF) && (sig64 | sig0) ) { +#if (i32_fromNaN == i32_fromPosOverflow) + sign = 0; +#elif (i32_fromNaN == i32_fromNegOverflow) + sign = 1; +#else + softfloat_raiseFlags( softfloat_flag_invalid ); + return i32_fromNaN; +#endif + } +#endif + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig64 |= UINT64_C( 0x0001000000000000 ); + sig64 |= (sig0 != 0); + shiftDist = 0x4023 - exp; + if ( 0 < shiftDist ) sig64 = softfloat_shiftRightJam64( sig64, shiftDist ); + return softfloat_roundToI32( sign, sig64, roundingMode, exact ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_to_i32_r_minMag.c b/vendor/riscv-isa-sim/softfloat/f128_to_i32_r_minMag.c new file mode 100644 index 00000000..fc9f84f1 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_to_i32_r_minMag.c @@ -0,0 +1,100 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast32_t f128_to_i32_r_minMag( float128_t a, bool exact ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + int_fast32_t exp; + uint_fast64_t sig64; + int_fast32_t shiftDist; + bool sign; + int_fast32_t absZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + exp = expF128UI64( uiA64 ); + sig64 = fracF128UI64( uiA64 ) | (uiA0 != 0); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x402F - exp; + if ( 49 <= shiftDist ) { + if ( exact && (exp | sig64) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF128UI64( uiA64 ); + if ( shiftDist < 18 ) { + if ( + sign && (shiftDist == 17) + && (sig64 < UINT64_C( 0x0000000000020000 )) + ) { + if ( exact && sig64 ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return -0x7FFFFFFF - 1; + } + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FFF) && sig64 ? i32_fromNaN + : sign ? i32_fromNegOverflow : i32_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig64 |= UINT64_C( 0x0001000000000000 ); + absZ = sig64>>shiftDist; + if ( + exact && ((uint_fast64_t) (uint_fast32_t) absZ< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t f128_to_i64( float128_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + bool sign; + int_fast32_t exp; + uint_fast64_t sig64, sig0; + int_fast32_t shiftDist; + struct uint128 sig128; + struct uint64_extra sigExtra; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + sign = signF128UI64( uiA64 ); + exp = expF128UI64( uiA64 ); + sig64 = fracF128UI64( uiA64 ); + sig0 = uiA0; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x402F - exp; + if ( shiftDist <= 0 ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( shiftDist < -15 ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FFF) && (sig64 | sig0) ? i64_fromNaN + : sign ? i64_fromNegOverflow : i64_fromPosOverflow; + } + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + sig64 |= UINT64_C( 0x0001000000000000 ); + if ( shiftDist ) { + sig128 = softfloat_shortShiftLeft128( sig64, sig0, -shiftDist ); + sig64 = sig128.v64; + sig0 = sig128.v0; + } + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( exp ) sig64 |= UINT64_C( 0x0001000000000000 ); + sigExtra = softfloat_shiftRightJam64Extra( sig64, sig0, shiftDist ); + sig64 = sigExtra.v; + sig0 = sigExtra.extra; + } + return softfloat_roundToI64( sign, sig64, sig0, roundingMode, exact ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_to_i64_r_minMag.c b/vendor/riscv-isa-sim/softfloat/f128_to_i64_r_minMag.c new file mode 100644 index 00000000..7e0d63da --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_to_i64_r_minMag.c @@ -0,0 +1,113 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t f128_to_i64_r_minMag( float128_t a, bool exact ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + bool sign; + int_fast32_t exp; + uint_fast64_t sig64, sig0; + int_fast32_t shiftDist; + int_fast8_t negShiftDist; + int_fast64_t absZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + sign = signF128UI64( uiA64 ); + exp = expF128UI64( uiA64 ); + sig64 = fracF128UI64( uiA64 ); + sig0 = uiA0; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x402F - exp; + if ( shiftDist < 0 ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( shiftDist < -14 ) { + if ( + (uiA64 == UINT64_C( 0xC03E000000000000 )) + && (sig0 < UINT64_C( 0x0002000000000000 )) + ) { + if ( exact && sig0 ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return -INT64_C( 0x7FFFFFFFFFFFFFFF ) - 1; + } + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FFF) && (sig64 | sig0) ? i64_fromNaN + : sign ? i64_fromNegOverflow : i64_fromPosOverflow; + } + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + sig64 |= UINT64_C( 0x0001000000000000 ); + negShiftDist = -shiftDist; + absZ = sig64<>(shiftDist & 63); + if ( exact && (uint64_t) (sig0<>shiftDist; + if ( exact && (sig0 || (absZ< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast32_t + f128_to_ui32( float128_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + bool sign; + int_fast32_t exp; + uint_fast64_t sig64; + int_fast32_t shiftDist; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + sign = signF128UI64( uiA64 ); + exp = expF128UI64( uiA64 ); + sig64 = fracF128UI64( uiA64 ) | (uiA0 != 0); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ +#if (ui32_fromNaN != ui32_fromPosOverflow) || (ui32_fromNaN != ui32_fromNegOverflow) + if ( (exp == 0x7FFF) && sig64 ) { +#if (ui32_fromNaN == ui32_fromPosOverflow) + sign = 0; +#elif (ui32_fromNaN == ui32_fromNegOverflow) + sign = 1; +#else + softfloat_raiseFlags( softfloat_flag_invalid ); + return ui32_fromNaN; +#endif + } +#endif + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig64 |= UINT64_C( 0x0001000000000000 ); + shiftDist = 0x4023 - exp; + if ( 0 < shiftDist ) { + sig64 = softfloat_shiftRightJam64( sig64, shiftDist ); + } + return softfloat_roundToUI32( sign, sig64, roundingMode, exact ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_to_ui32_r_minMag.c b/vendor/riscv-isa-sim/softfloat/f128_to_ui32_r_minMag.c new file mode 100644 index 00000000..2097fb81 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_to_ui32_r_minMag.c @@ -0,0 +1,89 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast32_t f128_to_ui32_r_minMag( float128_t a, bool exact ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + int_fast32_t exp; + uint_fast64_t sig64; + int_fast32_t shiftDist; + bool sign; + uint_fast32_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + exp = expF128UI64( uiA64 ); + sig64 = fracF128UI64( uiA64 ) | (uiA0 != 0); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x402F - exp; + if ( 49 <= shiftDist ) { + if ( exact && (exp | sig64) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF128UI64( uiA64 ); + if ( sign || (shiftDist < 17) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FFF) && sig64 ? ui32_fromNaN + : sign ? ui32_fromNegOverflow : ui32_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig64 |= UINT64_C( 0x0001000000000000 ); + z = sig64>>shiftDist; + if ( exact && ((uint_fast64_t) z< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t + f128_to_ui64( float128_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + bool sign; + int_fast32_t exp; + uint_fast64_t sig64, sig0; + int_fast32_t shiftDist; + struct uint128 sig128; + struct uint64_extra sigExtra; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + sign = signF128UI64( uiA64 ); + exp = expF128UI64( uiA64 ); + sig64 = fracF128UI64( uiA64 ); + sig0 = uiA0; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x402F - exp; + if ( shiftDist <= 0 ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( shiftDist < -15 ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FFF) && (sig64 | sig0) ? ui64_fromNaN + : sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + } + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + sig64 |= UINT64_C( 0x0001000000000000 ); + if ( shiftDist ) { + sig128 = softfloat_shortShiftLeft128( sig64, sig0, -shiftDist ); + sig64 = sig128.v64; + sig0 = sig128.v0; + } + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( exp ) sig64 |= UINT64_C( 0x0001000000000000 ); + sigExtra = softfloat_shiftRightJam64Extra( sig64, sig0, shiftDist ); + sig64 = sigExtra.v; + sig0 = sigExtra.extra; + } + return softfloat_roundToUI64( sign, sig64, sig0, roundingMode, exact ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_to_ui64_r_minMag.c b/vendor/riscv-isa-sim/softfloat/f128_to_ui64_r_minMag.c new file mode 100644 index 00000000..fb16320a --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_to_ui64_r_minMag.c @@ -0,0 +1,105 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t f128_to_ui64_r_minMag( float128_t a, bool exact ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + bool sign; + int_fast32_t exp; + uint_fast64_t sig64, sig0; + int_fast32_t shiftDist; + int_fast8_t negShiftDist; + uint_fast64_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + sign = signF128UI64( uiA64 ); + exp = expF128UI64( uiA64 ); + sig64 = fracF128UI64( uiA64 ); + sig0 = uiA0; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x402F - exp; + if ( shiftDist < 0 ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( sign || (shiftDist < -15) ) goto invalid; + sig64 |= UINT64_C( 0x0001000000000000 ); + negShiftDist = -shiftDist; + z = sig64<>(shiftDist & 63); + if ( exact && (uint64_t) (sig0<>shiftDist; + if ( exact && (sig0 || (z< +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float16_t f16_add( float16_t a, float16_t b ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + union ui16_f16 uB; + uint_fast16_t uiB; +#if ! defined INLINE_LEVEL || (INLINE_LEVEL < 1) + float16_t (*magsFuncPtr)( uint_fast16_t, uint_fast16_t ); +#endif + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; +#if defined INLINE_LEVEL && (1 <= INLINE_LEVEL) + if ( signF16UI( uiA ^ uiB ) ) { + return softfloat_subMagsF16( uiA, uiB ); + } else { + return softfloat_addMagsF16( uiA, uiB ); + } +#else + magsFuncPtr = + signF16UI( uiA ^ uiB ) ? softfloat_subMagsF16 : softfloat_addMagsF16; + return (*magsFuncPtr)( uiA, uiB ); +#endif + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_classify.c b/vendor/riscv-isa-sim/softfloat/f16_classify.c new file mode 100755 index 00000000..9402ff13 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_classify.c @@ -0,0 +1,36 @@ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast16_t f16_classify( float16_t a ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + + uA.f = a; + uiA = uA.ui; + + uint_fast16_t infOrNaN = expF16UI( uiA ) == 0x1F; + uint_fast16_t subnormalOrZero = expF16UI( uiA ) == 0; + bool sign = signF16UI( uiA ); + bool fracZero = fracF16UI( uiA ) == 0; + bool isNaN = isNaNF16UI( uiA ); + bool isSNaN = softfloat_isSigNaNF16UI( uiA ); + + return + ( sign && infOrNaN && fracZero ) << 0 | + ( sign && !infOrNaN && !subnormalOrZero ) << 1 | + ( sign && subnormalOrZero && !fracZero ) << 2 | + ( sign && subnormalOrZero && fracZero ) << 3 | + ( !sign && infOrNaN && fracZero ) << 7 | + ( !sign && !infOrNaN && !subnormalOrZero ) << 6 | + ( !sign && subnormalOrZero && !fracZero ) << 5 | + ( !sign && subnormalOrZero && fracZero ) << 4 | + ( isNaN && isSNaN ) << 8 | + ( isNaN && !isSNaN ) << 9; +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_div.c b/vendor/riscv-isa-sim/softfloat/f16_div.c new file mode 100644 index 00000000..71b5c29b --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_div.c @@ -0,0 +1,186 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +extern const uint16_t softfloat_approxRecip_1k0s[]; +extern const uint16_t softfloat_approxRecip_1k1s[]; + +float16_t f16_div( float16_t a, float16_t b ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + bool signA; + int_fast8_t expA; + uint_fast16_t sigA; + union ui16_f16 uB; + uint_fast16_t uiB; + bool signB; + int_fast8_t expB; + uint_fast16_t sigB; + bool signZ; + struct exp8_sig16 normExpSig; + int_fast8_t expZ; +#ifdef SOFTFLOAT_FAST_DIV32TO16 + uint_fast32_t sig32A; + uint_fast16_t sigZ; +#else + int index; + uint16_t r0; + uint_fast16_t sigZ, rem; +#endif + uint_fast16_t uiZ; + union ui16_f16 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF16UI( uiA ); + expA = expF16UI( uiA ); + sigA = fracF16UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF16UI( uiB ); + expB = expF16UI( uiB ); + sigB = fracF16UI( uiB ); + signZ = signA ^ signB; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x1F ) { + if ( sigA ) goto propagateNaN; + if ( expB == 0x1F ) { + if ( sigB ) goto propagateNaN; + goto invalid; + } + goto infinity; + } + if ( expB == 0x1F ) { + if ( sigB ) goto propagateNaN; + goto zero; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expB ) { + if ( ! sigB ) { + if ( ! (expA | sigA) ) goto invalid; + softfloat_raiseFlags( softfloat_flag_infinite ); + goto infinity; + } + normExpSig = softfloat_normSubnormalF16Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + if ( ! expA ) { + if ( ! sigA ) goto zero; + normExpSig = softfloat_normSubnormalF16Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA - expB + 0xE; + sigA |= 0x0400; + sigB |= 0x0400; +#ifdef SOFTFLOAT_FAST_DIV32TO16 + if ( sigA < sigB ) { + --expZ; + sig32A = (uint_fast32_t) sigA<<15; + } else { + sig32A = (uint_fast32_t) sigA<<14; + } + sigZ = sig32A / sigB; + if ( ! (sigZ & 7) ) sigZ |= ((uint_fast32_t) sigB * sigZ != sig32A); +#else + if ( sigA < sigB ) { + --expZ; + sigA <<= 5; + } else { + sigA <<= 4; + } + index = sigB>>6 & 0xF; + r0 = softfloat_approxRecip_1k0s[index] + - (((uint_fast32_t) softfloat_approxRecip_1k1s[index] + * (sigB & 0x3F)) + >>10); + sigZ = ((uint_fast32_t) sigA * r0)>>16; + rem = (sigA<<10) - sigZ * sigB; + sigZ += (rem * (uint_fast32_t) r0)>>26; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + ++sigZ; + if ( ! (sigZ & 7) ) { + sigZ &= ~1; + rem = (sigA<<10) - sigZ * sigB; + if ( rem & 0x8000 ) { + sigZ -= 2; + } else { + if ( rem ) sigZ |= 1; + } + } +#endif + return softfloat_roundPackToF16( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF16UI( uiA, uiB ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF16UI; + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infinity: + uiZ = packToF16UI( signZ, 0x1F, 0 ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zero: + uiZ = packToF16UI( signZ, 0, 0 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_eq.c b/vendor/riscv-isa-sim/softfloat/f16_eq.c new file mode 100644 index 00000000..37a60998 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_eq.c @@ -0,0 +1,66 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f16_eq( float16_t a, float16_t b ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + union ui16_f16 uB; + uint_fast16_t uiB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF16UI( uiA ) || isNaNF16UI( uiB ) ) { + if ( + softfloat_isSigNaNF16UI( uiA ) || softfloat_isSigNaNF16UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + return (uiA == uiB) || ! (uint16_t) ((uiA | uiB)<<1); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_eq_signaling.c b/vendor/riscv-isa-sim/softfloat/f16_eq_signaling.c new file mode 100644 index 00000000..894f7b59 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_eq_signaling.c @@ -0,0 +1,61 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f16_eq_signaling( float16_t a, float16_t b ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + union ui16_f16 uB; + uint_fast16_t uiB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF16UI( uiA ) || isNaNF16UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + return (uiA == uiB) || ! (uint16_t) ((uiA | uiB)<<1); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_isSignalingNaN.c b/vendor/riscv-isa-sim/softfloat/f16_isSignalingNaN.c new file mode 100644 index 00000000..657805be --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_isSignalingNaN.c @@ -0,0 +1,51 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f16_isSignalingNaN( float16_t a ) +{ + union ui16_f16 uA; + + uA.f = a; + return softfloat_isSigNaNF16UI( uA.ui ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_le.c b/vendor/riscv-isa-sim/softfloat/f16_le.c new file mode 100644 index 00000000..37eaf187 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_le.c @@ -0,0 +1,66 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f16_le( float16_t a, float16_t b ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + union ui16_f16 uB; + uint_fast16_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF16UI( uiA ) || isNaNF16UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + signA = signF16UI( uiA ); + signB = signF16UI( uiB ); + return + (signA != signB) ? signA || ! (uint16_t) ((uiA | uiB)<<1) + : (uiA == uiB) || (signA ^ (uiA < uiB)); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_le_quiet.c b/vendor/riscv-isa-sim/softfloat/f16_le_quiet.c new file mode 100644 index 00000000..8391db74 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_le_quiet.c @@ -0,0 +1,71 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f16_le_quiet( float16_t a, float16_t b ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + union ui16_f16 uB; + uint_fast16_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF16UI( uiA ) || isNaNF16UI( uiB ) ) { + if ( + softfloat_isSigNaNF16UI( uiA ) || softfloat_isSigNaNF16UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + signA = signF16UI( uiA ); + signB = signF16UI( uiB ); + return + (signA != signB) ? signA || ! (uint16_t) ((uiA | uiB)<<1) + : (uiA == uiB) || (signA ^ (uiA < uiB)); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_lt.c b/vendor/riscv-isa-sim/softfloat/f16_lt.c new file mode 100644 index 00000000..3d3522a4 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_lt.c @@ -0,0 +1,66 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f16_lt( float16_t a, float16_t b ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + union ui16_f16 uB; + uint_fast16_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF16UI( uiA ) || isNaNF16UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + signA = signF16UI( uiA ); + signB = signF16UI( uiB ); + return + (signA != signB) ? signA && ((uint16_t) ((uiA | uiB)<<1) != 0) + : (uiA != uiB) && (signA ^ (uiA < uiB)); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_lt_quiet.c b/vendor/riscv-isa-sim/softfloat/f16_lt_quiet.c new file mode 100644 index 00000000..37f762cd --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_lt_quiet.c @@ -0,0 +1,71 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f16_lt_quiet( float16_t a, float16_t b ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + union ui16_f16 uB; + uint_fast16_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF16UI( uiA ) || isNaNF16UI( uiB ) ) { + if ( + softfloat_isSigNaNF16UI( uiA ) || softfloat_isSigNaNF16UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + signA = signF16UI( uiA ); + signB = signF16UI( uiB ); + return + (signA != signB) ? signA && ((uint16_t) ((uiA | uiB)<<1) != 0) + : (uiA != uiB) && (signA ^ (uiA < uiB)); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_mul.c b/vendor/riscv-isa-sim/softfloat/f16_mul.c new file mode 100644 index 00000000..255caa7e --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_mul.c @@ -0,0 +1,140 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float16_t f16_mul( float16_t a, float16_t b ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + bool signA; + int_fast8_t expA; + uint_fast16_t sigA; + union ui16_f16 uB; + uint_fast16_t uiB; + bool signB; + int_fast8_t expB; + uint_fast16_t sigB; + bool signZ; + uint_fast16_t magBits; + struct exp8_sig16 normExpSig; + int_fast8_t expZ; + uint_fast32_t sig32Z; + uint_fast16_t sigZ, uiZ; + union ui16_f16 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF16UI( uiA ); + expA = expF16UI( uiA ); + sigA = fracF16UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF16UI( uiB ); + expB = expF16UI( uiB ); + sigB = fracF16UI( uiB ); + signZ = signA ^ signB; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x1F ) { + if ( sigA || ((expB == 0x1F) && sigB) ) goto propagateNaN; + magBits = expB | sigB; + goto infArg; + } + if ( expB == 0x1F ) { + if ( sigB ) goto propagateNaN; + magBits = expA | sigA; + goto infArg; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) goto zero; + normExpSig = softfloat_normSubnormalF16Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + if ( ! expB ) { + if ( ! sigB ) goto zero; + normExpSig = softfloat_normSubnormalF16Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA + expB - 0xF; + sigA = (sigA | 0x0400)<<4; + sigB = (sigB | 0x0400)<<5; + sig32Z = (uint_fast32_t) sigA * sigB; + sigZ = sig32Z>>16; + if ( sig32Z & 0xFFFF ) sigZ |= 1; + if ( sigZ < 0x4000 ) { + --expZ; + sigZ <<= 1; + } + return softfloat_roundPackToF16( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF16UI( uiA, uiB ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infArg: + if ( ! magBits ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF16UI; + } else { + uiZ = packToF16UI( signZ, 0x1F, 0 ); + } + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zero: + uiZ = packToF16UI( signZ, 0, 0 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_mulAdd.c b/vendor/riscv-isa-sim/softfloat/f16_mulAdd.c new file mode 100644 index 00000000..40261963 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_mulAdd.c @@ -0,0 +1,60 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float16_t f16_mulAdd( float16_t a, float16_t b, float16_t c ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + union ui16_f16 uB; + uint_fast16_t uiB; + union ui16_f16 uC; + uint_fast16_t uiC; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + uC.f = c; + uiC = uC.ui; + return softfloat_mulAddF16( uiA, uiB, uiC, 0 ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_rem.c b/vendor/riscv-isa-sim/softfloat/f16_rem.c new file mode 100644 index 00000000..86c319dd --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_rem.c @@ -0,0 +1,171 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float16_t f16_rem( float16_t a, float16_t b ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + bool signA; + int_fast8_t expA; + uint_fast16_t sigA; + union ui16_f16 uB; + uint_fast16_t uiB; + int_fast8_t expB; + uint_fast16_t sigB; + struct exp8_sig16 normExpSig; + uint16_t rem; + int_fast8_t expDiff; + uint_fast16_t q; + uint32_t recip32, q32; + uint16_t altRem, meanRem; + bool signRem; + uint_fast16_t uiZ; + union ui16_f16 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF16UI( uiA ); + expA = expF16UI( uiA ); + sigA = fracF16UI( uiA ); + uB.f = b; + uiB = uB.ui; + expB = expF16UI( uiB ); + sigB = fracF16UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x1F ) { + if ( sigA || ((expB == 0x1F) && sigB) ) goto propagateNaN; + goto invalid; + } + if ( expB == 0x1F ) { + if ( sigB ) goto propagateNaN; + return a; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expB ) { + if ( ! sigB ) goto invalid; + normExpSig = softfloat_normSubnormalF16Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + if ( ! expA ) { + if ( ! sigA ) return a; + normExpSig = softfloat_normSubnormalF16Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + rem = sigA | 0x0400; + sigB |= 0x0400; + expDiff = expA - expB; + if ( expDiff < 1 ) { + if ( expDiff < -1 ) return a; + sigB <<= 3; + if ( expDiff ) { + rem <<= 2; + q = 0; + } else { + rem <<= 3; + q = (sigB <= rem); + if ( q ) rem -= sigB; + } + } else { + recip32 = softfloat_approxRecip32_1( (uint_fast32_t) sigB<<21 ); + /*-------------------------------------------------------------------- + | Changing the shift of `rem' here requires also changing the initial + | subtraction from `expDiff'. + *--------------------------------------------------------------------*/ + rem <<= 4; + expDiff -= 31; + /*-------------------------------------------------------------------- + | The scale of `sigB' affects how many bits are obtained during each + | cycle of the loop. Currently this is 29 bits per loop iteration, + | which is believed to be the maximum possible. + *--------------------------------------------------------------------*/ + sigB <<= 3; + for (;;) { + q32 = (rem * (uint_fast64_t) recip32)>>16; + if ( expDiff < 0 ) break; + rem = -((uint_fast16_t) q32 * sigB); + expDiff -= 29; + } + /*-------------------------------------------------------------------- + | (`expDiff' cannot be less than -30 here.) + *--------------------------------------------------------------------*/ + q32 >>= ~expDiff & 31; + q = q32; + rem = (rem<<(expDiff + 30)) - q * sigB; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + do { + altRem = rem; + ++q; + rem -= sigB; + } while ( ! (rem & 0x8000) ); + meanRem = rem + altRem; + if ( (meanRem & 0x8000) || (! meanRem && (q & 1)) ) rem = altRem; + signRem = signA; + if ( 0x8000 <= rem ) { + signRem = ! signRem; + rem = -rem; + } + return softfloat_normRoundPackToF16( signRem, expB, rem ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF16UI( uiA, uiB ); + goto uiZ; + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF16UI; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_roundToInt.c b/vendor/riscv-isa-sim/softfloat/f16_roundToInt.c new file mode 100644 index 00000000..9bbd47eb --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_roundToInt.c @@ -0,0 +1,112 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float16_t f16_roundToInt( float16_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + int_fast8_t exp; + uint_fast16_t uiZ, lastBitMask, roundBitsMask; + union ui16_f16 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF16UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp <= 0xE ) { + if ( ! (uint16_t) (uiA<<1) ) return a; + if ( exact ) softfloat_exceptionFlags |= softfloat_flag_inexact; + uiZ = uiA & packToF16UI( 1, 0, 0 ); + switch ( roundingMode ) { + case softfloat_round_near_even: + if ( ! fracF16UI( uiA ) ) break; + case softfloat_round_near_maxMag: + if ( exp == 0xE ) uiZ |= packToF16UI( 0, 0xF, 0 ); + break; + case softfloat_round_min: + if ( uiZ ) uiZ = packToF16UI( 1, 0xF, 0 ); + break; + case softfloat_round_max: + if ( ! uiZ ) uiZ = packToF16UI( 0, 0xF, 0 ); + break; + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( 0x19 <= exp ) { + if ( (exp == 0x1F) && fracF16UI( uiA ) ) { + uiZ = softfloat_propagateNaNF16UI( uiA, 0 ); + goto uiZ; + } + return a; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uiZ = uiA; + lastBitMask = (uint_fast16_t) 1<<(0x19 - exp); + roundBitsMask = lastBitMask - 1; + if ( roundingMode == softfloat_round_near_maxMag ) { + uiZ += lastBitMask>>1; + } else if ( roundingMode == softfloat_round_near_even ) { + uiZ += lastBitMask>>1; + if ( ! (uiZ & roundBitsMask) ) uiZ &= ~lastBitMask; + } else if ( + roundingMode + == (signF16UI( uiZ ) ? softfloat_round_min : softfloat_round_max) + ) { + uiZ += roundBitsMask; + } + uiZ &= ~roundBitsMask; + if ( exact && (uiZ != uiA) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_sqrt.c b/vendor/riscv-isa-sim/softfloat/f16_sqrt.c new file mode 100644 index 00000000..7ff29239 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_sqrt.c @@ -0,0 +1,136 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +extern const uint16_t softfloat_approxRecipSqrt_1k0s[]; +extern const uint16_t softfloat_approxRecipSqrt_1k1s[]; + +float16_t f16_sqrt( float16_t a ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + bool signA; + int_fast8_t expA; + uint_fast16_t sigA, uiZ; + struct exp8_sig16 normExpSig; + int_fast8_t expZ; + int index; + uint_fast16_t r0; + uint_fast32_t ESqrR0; + uint16_t sigma0; + uint_fast16_t recipSqrt16, sigZ, shiftedSigZ; + uint16_t negRem; + union ui16_f16 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF16UI( uiA ); + expA = expF16UI( uiA ); + sigA = fracF16UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x1F ) { + if ( sigA ) { + uiZ = softfloat_propagateNaNF16UI( uiA, 0 ); + goto uiZ; + } + if ( ! signA ) return a; + goto invalid; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( signA ) { + if ( ! (expA | sigA) ) return a; + goto invalid; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) return a; + normExpSig = softfloat_normSubnormalF16Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = ((expA - 0xF)>>1) + 0xE; + expA &= 1; + sigA |= 0x0400; + index = (sigA>>6 & 0xE) + expA; + r0 = softfloat_approxRecipSqrt_1k0s[index] + - (((uint_fast32_t) softfloat_approxRecipSqrt_1k1s[index] + * (sigA & 0x7F)) + >>11); + ESqrR0 = ((uint_fast32_t) r0 * r0)>>1; + if ( expA ) ESqrR0 >>= 1; + sigma0 = ~(uint_fast16_t) ((ESqrR0 * sigA)>>16); + recipSqrt16 = r0 + (((uint_fast32_t) r0 * sigma0)>>25); + if ( ! (recipSqrt16 & 0x8000) ) recipSqrt16 = 0x8000; + sigZ = ((uint_fast32_t) (sigA<<5) * recipSqrt16)>>16; + if ( expA ) sigZ >>= 1; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + ++sigZ; + if ( ! (sigZ & 7) ) { + shiftedSigZ = sigZ>>1; + negRem = shiftedSigZ * shiftedSigZ; + sigZ &= ~1; + if ( negRem & 0x8000 ) { + sigZ |= 1; + } else { + if ( negRem ) --sigZ; + } + } + return softfloat_roundPackToF16( 0, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF16UI; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_sub.c b/vendor/riscv-isa-sim/softfloat/f16_sub.c new file mode 100644 index 00000000..811f239f --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_sub.c @@ -0,0 +1,70 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float16_t f16_sub( float16_t a, float16_t b ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + union ui16_f16 uB; + uint_fast16_t uiB; +#if ! defined INLINE_LEVEL || (INLINE_LEVEL < 1) + float16_t (*magsFuncPtr)( uint_fast16_t, uint_fast16_t ); +#endif + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; +#if defined INLINE_LEVEL && (1 <= INLINE_LEVEL) + if ( signF16UI( uiA ^ uiB ) ) { + return softfloat_addMagsF16( uiA, uiB ); + } else { + return softfloat_subMagsF16( uiA, uiB ); + } +#else + magsFuncPtr = + signF16UI( uiA ^ uiB ) ? softfloat_addMagsF16 : softfloat_subMagsF16; + return (*magsFuncPtr)( uiA, uiB ); +#endif + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_to_f128.c b/vendor/riscv-isa-sim/softfloat/f16_to_f128.c new file mode 100644 index 00000000..961cdaaf --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_to_f128.c @@ -0,0 +1,96 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float128_t f16_to_f128( float16_t a ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + bool sign; + int_fast8_t exp; + uint_fast16_t frac; + struct commonNaN commonNaN; + struct uint128 uiZ; + struct exp8_sig16 normExpSig; + union ui128_f128 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF16UI( uiA ); + exp = expF16UI( uiA ); + frac = fracF16UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0x1F ) { + if ( frac ) { + softfloat_f16UIToCommonNaN( uiA, &commonNaN ); + uiZ = softfloat_commonNaNToF128UI( &commonNaN ); + } else { + uiZ.v64 = packToF128UI64( sign, 0x7FFF, 0 ); + uiZ.v0 = 0; + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! exp ) { + if ( ! frac ) { + uiZ.v64 = packToF128UI64( sign, 0, 0 ); + uiZ.v0 = 0; + goto uiZ; + } + normExpSig = softfloat_normSubnormalF16Sig( frac ); + exp = normExpSig.exp - 1; + frac = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uiZ.v64 = packToF128UI64( sign, exp + 0x3FF0, (uint_fast64_t) frac<<38 ); + uiZ.v0 = 0; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_to_f32.c b/vendor/riscv-isa-sim/softfloat/f16_to_f32.c new file mode 100644 index 00000000..fb8b3819 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_to_f32.c @@ -0,0 +1,93 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f16_to_f32( float16_t a ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + bool sign; + int_fast8_t exp; + uint_fast16_t frac; + struct commonNaN commonNaN; + uint_fast32_t uiZ; + struct exp8_sig16 normExpSig; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF16UI( uiA ); + exp = expF16UI( uiA ); + frac = fracF16UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0x1F ) { + if ( frac ) { + softfloat_f16UIToCommonNaN( uiA, &commonNaN ); + uiZ = softfloat_commonNaNToF32UI( &commonNaN ); + } else { + uiZ = packToF32UI( sign, 0xFF, 0 ); + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! exp ) { + if ( ! frac ) { + uiZ = packToF32UI( sign, 0, 0 ); + goto uiZ; + } + normExpSig = softfloat_normSubnormalF16Sig( frac ); + exp = normExpSig.exp - 1; + frac = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uiZ = packToF32UI( sign, exp + 0x70, (uint_fast32_t) frac<<13 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_to_f64.c b/vendor/riscv-isa-sim/softfloat/f16_to_f64.c new file mode 100644 index 00000000..4ab27ba0 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_to_f64.c @@ -0,0 +1,93 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f16_to_f64( float16_t a ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + bool sign; + int_fast8_t exp; + uint_fast16_t frac; + struct commonNaN commonNaN; + uint_fast64_t uiZ; + struct exp8_sig16 normExpSig; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF16UI( uiA ); + exp = expF16UI( uiA ); + frac = fracF16UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0x1F ) { + if ( frac ) { + softfloat_f16UIToCommonNaN( uiA, &commonNaN ); + uiZ = softfloat_commonNaNToF64UI( &commonNaN ); + } else { + uiZ = packToF64UI( sign, 0x7FF, 0 ); + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! exp ) { + if ( ! frac ) { + uiZ = packToF64UI( sign, 0, 0 ); + goto uiZ; + } + normExpSig = softfloat_normSubnormalF16Sig( frac ); + exp = normExpSig.exp - 1; + frac = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uiZ = packToF64UI( sign, exp + 0x3F0, (uint_fast64_t) frac<<42 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_to_i16.c b/vendor/riscv-isa-sim/softfloat/f16_to_i16.c new file mode 100644 index 00000000..b0fbb7cc --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_to_i16.c @@ -0,0 +1,57 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "specialize.h" +#include "softfloat.h" + +int_fast16_t f16_to_i16( float16_t a, uint_fast8_t roundingMode, bool exact ) +{ + uint_fast8_t old_flags = softfloat_exceptionFlags; + + int_fast32_t sig32 = f16_to_i32(a, roundingMode, exact); + + if (sig32 > INT16_MAX) { + softfloat_exceptionFlags = old_flags | softfloat_flag_invalid; + return i16_fromPosOverflow; + } else if (sig32 < INT16_MIN) { + softfloat_exceptionFlags = old_flags | softfloat_flag_invalid; + return i16_fromNegOverflow; + } else { + return sig32; + } +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_to_i32.c b/vendor/riscv-isa-sim/softfloat/f16_to_i32.c new file mode 100644 index 00000000..24b19846 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_to_i32.c @@ -0,0 +1,87 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast32_t f16_to_i32( float16_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + bool sign; + int_fast8_t exp; + uint_fast16_t frac; + int_fast32_t sig32; + int_fast8_t shiftDist; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF16UI( uiA ); + exp = expF16UI( uiA ); + frac = fracF16UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0x1F ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + frac ? i32_fromNaN + : sign ? i32_fromNegOverflow : i32_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig32 = frac; + if ( exp ) { + sig32 |= 0x0400; + shiftDist = exp - 0x19; + if ( 0 <= shiftDist ) { + sig32 <<= shiftDist; + return sign ? -sig32 : sig32; + } + shiftDist = exp - 0x0D; + if ( 0 < shiftDist ) sig32 <<= shiftDist; + } + return + softfloat_roundToI32( + sign, (uint_fast32_t) sig32, roundingMode, exact ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_to_i32_r_minMag.c b/vendor/riscv-isa-sim/softfloat/f16_to_i32_r_minMag.c new file mode 100644 index 00000000..ebb4965c --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_to_i32_r_minMag.c @@ -0,0 +1,88 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast32_t f16_to_i32_r_minMag( float16_t a, bool exact ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + int_fast8_t exp; + uint_fast16_t frac; + int_fast8_t shiftDist; + bool sign; + int_fast32_t alignedSig; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF16UI( uiA ); + frac = fracF16UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = exp - 0x0F; + if ( shiftDist < 0 ) { + if ( exact && (exp | frac) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF16UI( uiA ); + if ( exp == 0x1F ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x1F) && frac ? i32_fromNaN + : sign ? i32_fromNegOverflow : i32_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + alignedSig = (int_fast32_t) (frac | 0x0400)<>= 10; + return sign ? -alignedSig : alignedSig; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_to_i64.c b/vendor/riscv-isa-sim/softfloat/f16_to_i64.c new file mode 100644 index 00000000..c2417456 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_to_i64.c @@ -0,0 +1,87 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t f16_to_i64( float16_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + bool sign; + int_fast8_t exp; + uint_fast16_t frac; + int_fast32_t sig32; + int_fast8_t shiftDist; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF16UI( uiA ); + exp = expF16UI( uiA ); + frac = fracF16UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0x1F ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + frac ? i64_fromNaN + : sign ? i64_fromNegOverflow : i64_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig32 = frac; + if ( exp ) { + sig32 |= 0x0400; + shiftDist = exp - 0x19; + if ( 0 <= shiftDist ) { + sig32 <<= shiftDist; + return sign ? -sig32 : sig32; + } + shiftDist = exp - 0x0D; + if ( 0 < shiftDist ) sig32 <<= shiftDist; + } + return + softfloat_roundToI32( + sign, (uint_fast32_t) sig32, roundingMode, exact ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_to_i64_r_minMag.c b/vendor/riscv-isa-sim/softfloat/f16_to_i64_r_minMag.c new file mode 100644 index 00000000..dc9a8d37 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_to_i64_r_minMag.c @@ -0,0 +1,88 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t f16_to_i64_r_minMag( float16_t a, bool exact ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + int_fast8_t exp; + uint_fast16_t frac; + int_fast8_t shiftDist; + bool sign; + int_fast32_t alignedSig; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF16UI( uiA ); + frac = fracF16UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = exp - 0x0F; + if ( shiftDist < 0 ) { + if ( exact && (exp | frac) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF16UI( uiA ); + if ( exp == 0x1F ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x1F) && frac ? i64_fromNaN + : sign ? i64_fromNegOverflow : i64_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + alignedSig = (int_fast32_t) (frac | 0x0400)<>= 10; + return sign ? -alignedSig : alignedSig; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_to_i8.c b/vendor/riscv-isa-sim/softfloat/f16_to_i8.c new file mode 100644 index 00000000..23638cc1 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_to_i8.c @@ -0,0 +1,57 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "specialize.h" +#include "softfloat.h" + +int_fast8_t f16_to_i8( float16_t a, uint_fast8_t roundingMode, bool exact ) +{ + uint_fast8_t old_flags = softfloat_exceptionFlags; + + int_fast32_t sig32 = f16_to_i32(a, roundingMode, exact); + + if (sig32 > INT8_MAX) { + softfloat_exceptionFlags = old_flags | softfloat_flag_invalid; + return i8_fromPosOverflow; + } else if (sig32 < INT8_MIN) { + softfloat_exceptionFlags = old_flags | softfloat_flag_invalid; + return i8_fromNegOverflow; + } else { + return sig32; + } +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_to_ui16.c b/vendor/riscv-isa-sim/softfloat/f16_to_ui16.c new file mode 100644 index 00000000..81c4f8d9 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_to_ui16.c @@ -0,0 +1,54 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "specialize.h" +#include "softfloat.h" + +uint_fast16_t f16_to_ui16( float16_t a, uint_fast8_t roundingMode, bool exact ) +{ + uint_fast8_t old_flags = softfloat_exceptionFlags; + + uint_fast32_t sig32 = f16_to_ui32(a, roundingMode, exact); + + if (sig32 > UINT16_MAX) { + softfloat_exceptionFlags = old_flags | softfloat_flag_invalid; + return ui16_fromPosOverflow; + } else { + return sig32; + } +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_to_ui32.c b/vendor/riscv-isa-sim/softfloat/f16_to_ui32.c new file mode 100644 index 00000000..c99af39c --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_to_ui32.c @@ -0,0 +1,84 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast32_t f16_to_ui32( float16_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + bool sign; + int_fast8_t exp; + uint_fast16_t frac; + uint_fast32_t sig32; + int_fast8_t shiftDist; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF16UI( uiA ); + exp = expF16UI( uiA ); + frac = fracF16UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0x1F ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + frac ? ui32_fromNaN + : sign ? ui32_fromNegOverflow : ui32_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig32 = frac; + if ( exp ) { + sig32 |= 0x0400; + shiftDist = exp - 0x19; + if ( (0 <= shiftDist) && ! sign ) { + return sig32< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast32_t f16_to_ui32_r_minMag( float16_t a, bool exact ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + int_fast8_t exp; + uint_fast16_t frac; + int_fast8_t shiftDist; + bool sign; + uint_fast32_t alignedSig; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF16UI( uiA ); + frac = fracF16UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = exp - 0x0F; + if ( shiftDist < 0 ) { + if ( exact && (exp | frac) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF16UI( uiA ); + if ( sign || (exp == 0x1F) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x1F) && frac ? ui32_fromNaN + : sign ? ui32_fromNegOverflow : ui32_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + alignedSig = (uint_fast32_t) (frac | 0x0400)<>10; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_to_ui64.c b/vendor/riscv-isa-sim/softfloat/f16_to_ui64.c new file mode 100644 index 00000000..dd260eae --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_to_ui64.c @@ -0,0 +1,84 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t f16_to_ui64( float16_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + bool sign; + int_fast8_t exp; + uint_fast16_t frac; + uint_fast32_t sig32; + int_fast8_t shiftDist; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF16UI( uiA ); + exp = expF16UI( uiA ); + frac = fracF16UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0x1F ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + frac ? ui64_fromNaN + : sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig32 = frac; + if ( exp ) { + sig32 |= 0x0400; + shiftDist = exp - 0x19; + if ( (0 <= shiftDist) && ! sign ) { + return sig32< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t f16_to_ui64_r_minMag( float16_t a, bool exact ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + int_fast8_t exp; + uint_fast16_t frac; + int_fast8_t shiftDist; + bool sign; + uint_fast32_t alignedSig; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF16UI( uiA ); + frac = fracF16UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = exp - 0x0F; + if ( shiftDist < 0 ) { + if ( exact && (exp | frac) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF16UI( uiA ); + if ( sign || (exp == 0x1F) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x1F) && frac ? ui64_fromNaN + : sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + alignedSig = (uint_fast32_t) (frac | 0x0400)<>10; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_to_ui8.c b/vendor/riscv-isa-sim/softfloat/f16_to_ui8.c new file mode 100644 index 00000000..96124e12 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_to_ui8.c @@ -0,0 +1,54 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "specialize.h" +#include "softfloat.h" + +uint_fast8_t f16_to_ui8( float16_t a, uint_fast8_t roundingMode, bool exact ) +{ + uint_fast8_t old_flags = softfloat_exceptionFlags; + + uint_fast32_t sig32 = f16_to_ui32(a, roundingMode, exact); + + if (sig32 > UINT8_MAX) { + softfloat_exceptionFlags = old_flags | softfloat_flag_invalid; + return ui8_fromPosOverflow; + } else { + return sig32; + } +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_add.c b/vendor/riscv-isa-sim/softfloat/f32_add.c new file mode 100644 index 00000000..4a51eccf --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_add.c @@ -0,0 +1,70 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t f32_add( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; +#if ! defined INLINE_LEVEL || (INLINE_LEVEL < 1) + float32_t (*magsFuncPtr)( uint_fast32_t, uint_fast32_t ); +#endif + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; +#if defined INLINE_LEVEL && (1 <= INLINE_LEVEL) + if ( signF32UI( uiA ^ uiB ) ) { + return softfloat_subMagsF32( uiA, uiB ); + } else { + return softfloat_addMagsF32( uiA, uiB ); + } +#else + magsFuncPtr = + signF32UI( uiA ^ uiB ) ? softfloat_subMagsF32 : softfloat_addMagsF32; + return (*magsFuncPtr)( uiA, uiB ); +#endif + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_classify.c b/vendor/riscv-isa-sim/softfloat/f32_classify.c new file mode 100755 index 00000000..83fad878 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_classify.c @@ -0,0 +1,36 @@ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast16_t f32_classify( float32_t a ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + + uA.f = a; + uiA = uA.ui; + + uint_fast16_t infOrNaN = expF32UI( uiA ) == 0xFF; + uint_fast16_t subnormalOrZero = expF32UI( uiA ) == 0; + bool sign = signF32UI( uiA ); + bool fracZero = fracF32UI( uiA ) == 0; + bool isNaN = isNaNF32UI( uiA ); + bool isSNaN = softfloat_isSigNaNF32UI( uiA ); + + return + ( sign && infOrNaN && fracZero ) << 0 | + ( sign && !infOrNaN && !subnormalOrZero ) << 1 | + ( sign && subnormalOrZero && !fracZero ) << 2 | + ( sign && subnormalOrZero && fracZero ) << 3 | + ( !sign && infOrNaN && fracZero ) << 7 | + ( !sign && !infOrNaN && !subnormalOrZero ) << 6 | + ( !sign && subnormalOrZero && !fracZero ) << 5 | + ( !sign && subnormalOrZero && fracZero ) << 4 | + ( isNaN && isSNaN ) << 8 | + ( isNaN && !isSNaN ) << 9; +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_div.c b/vendor/riscv-isa-sim/softfloat/f32_div.c new file mode 100644 index 00000000..9d101254 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_div.c @@ -0,0 +1,180 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f32_div( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool signA; + int_fast16_t expA; + uint_fast32_t sigA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signB; + int_fast16_t expB; + uint_fast32_t sigB; + bool signZ; + struct exp16_sig32 normExpSig; + int_fast16_t expZ; +#ifdef SOFTFLOAT_FAST_DIV64TO32 + uint_fast64_t sig64A; + uint_fast32_t sigZ; +#else + uint_fast32_t sigZ; + uint_fast64_t rem; +#endif + uint_fast32_t uiZ; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF32UI( uiA ); + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF32UI( uiB ); + expB = expF32UI( uiB ); + sigB = fracF32UI( uiB ); + signZ = signA ^ signB; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0xFF ) { + if ( sigA ) goto propagateNaN; + if ( expB == 0xFF ) { + if ( sigB ) goto propagateNaN; + goto invalid; + } + goto infinity; + } + if ( expB == 0xFF ) { + if ( sigB ) goto propagateNaN; + goto zero; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expB ) { + if ( ! sigB ) { + if ( ! (expA | sigA) ) goto invalid; + softfloat_raiseFlags( softfloat_flag_infinite ); + goto infinity; + } + normExpSig = softfloat_normSubnormalF32Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + if ( ! expA ) { + if ( ! sigA ) goto zero; + normExpSig = softfloat_normSubnormalF32Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA - expB + 0x7E; + sigA |= 0x00800000; + sigB |= 0x00800000; +#ifdef SOFTFLOAT_FAST_DIV64TO32 + if ( sigA < sigB ) { + --expZ; + sig64A = (uint_fast64_t) sigA<<31; + } else { + sig64A = (uint_fast64_t) sigA<<30; + } + sigZ = sig64A / sigB; + if ( ! (sigZ & 0x3F) ) sigZ |= ((uint_fast64_t) sigB * sigZ != sig64A); +#else + if ( sigA < sigB ) { + --expZ; + sigA <<= 8; + } else { + sigA <<= 7; + } + sigB <<= 8; + sigZ = ((uint_fast64_t) sigA * softfloat_approxRecip32_1( sigB ))>>32; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sigZ += 2; + if ( (sigZ & 0x3F) < 2 ) { + sigZ &= ~3; +#ifdef SOFTFLOAT_FAST_INT64 + rem = ((uint_fast64_t) sigA<<31) - (uint_fast64_t) sigZ * sigB; +#else + rem = ((uint_fast64_t) sigA<<32) - (uint_fast64_t) (sigZ<<1) * sigB; +#endif + if ( rem & UINT64_C( 0x8000000000000000 ) ) { + sigZ -= 4; + } else { + if ( rem ) sigZ |= 1; + } + } +#endif + return softfloat_roundPackToF32( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF32UI( uiA, uiB ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF32UI; + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infinity: + uiZ = packToF32UI( signZ, 0xFF, 0 ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zero: + uiZ = packToF32UI( signZ, 0, 0 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_eq.c b/vendor/riscv-isa-sim/softfloat/f32_eq.c new file mode 100644 index 00000000..5f07eee3 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_eq.c @@ -0,0 +1,66 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f32_eq( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) { + if ( + softfloat_isSigNaNF32UI( uiA ) || softfloat_isSigNaNF32UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + return (uiA == uiB) || ! (uint32_t) ((uiA | uiB)<<1); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_eq_signaling.c b/vendor/riscv-isa-sim/softfloat/f32_eq_signaling.c new file mode 100644 index 00000000..f5fcc824 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_eq_signaling.c @@ -0,0 +1,61 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f32_eq_signaling( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + return (uiA == uiB) || ! (uint32_t) ((uiA | uiB)<<1); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_isSignalingNaN.c b/vendor/riscv-isa-sim/softfloat/f32_isSignalingNaN.c new file mode 100644 index 00000000..5004a5aa --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_isSignalingNaN.c @@ -0,0 +1,51 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f32_isSignalingNaN( float32_t a ) +{ + union ui32_f32 uA; + + uA.f = a; + return softfloat_isSigNaNF32UI( uA.ui ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_le.c b/vendor/riscv-isa-sim/softfloat/f32_le.c new file mode 100644 index 00000000..77595fbb --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_le.c @@ -0,0 +1,66 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f32_le( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + signA = signF32UI( uiA ); + signB = signF32UI( uiB ); + return + (signA != signB) ? signA || ! (uint32_t) ((uiA | uiB)<<1) + : (uiA == uiB) || (signA ^ (uiA < uiB)); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_le_quiet.c b/vendor/riscv-isa-sim/softfloat/f32_le_quiet.c new file mode 100644 index 00000000..1ec91010 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_le_quiet.c @@ -0,0 +1,71 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f32_le_quiet( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) { + if ( + softfloat_isSigNaNF32UI( uiA ) || softfloat_isSigNaNF32UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + signA = signF32UI( uiA ); + signB = signF32UI( uiB ); + return + (signA != signB) ? signA || ! (uint32_t) ((uiA | uiB)<<1) + : (uiA == uiB) || (signA ^ (uiA < uiB)); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_lt.c b/vendor/riscv-isa-sim/softfloat/f32_lt.c new file mode 100644 index 00000000..9e12843f --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_lt.c @@ -0,0 +1,66 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f32_lt( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + signA = signF32UI( uiA ); + signB = signF32UI( uiB ); + return + (signA != signB) ? signA && ((uint32_t) ((uiA | uiB)<<1) != 0) + : (uiA != uiB) && (signA ^ (uiA < uiB)); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_lt_quiet.c b/vendor/riscv-isa-sim/softfloat/f32_lt_quiet.c new file mode 100644 index 00000000..9f83b810 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_lt_quiet.c @@ -0,0 +1,71 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f32_lt_quiet( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) { + if ( + softfloat_isSigNaNF32UI( uiA ) || softfloat_isSigNaNF32UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + signA = signF32UI( uiA ); + signB = signF32UI( uiB ); + return + (signA != signB) ? signA && ((uint32_t) ((uiA | uiB)<<1) != 0) + : (uiA != uiB) && (signA ^ (uiA < uiB)); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_mul.c b/vendor/riscv-isa-sim/softfloat/f32_mul.c new file mode 100644 index 00000000..a2a673f1 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_mul.c @@ -0,0 +1,137 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f32_mul( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool signA; + int_fast16_t expA; + uint_fast32_t sigA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signB; + int_fast16_t expB; + uint_fast32_t sigB; + bool signZ; + uint_fast32_t magBits; + struct exp16_sig32 normExpSig; + int_fast16_t expZ; + uint_fast32_t sigZ, uiZ; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF32UI( uiA ); + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF32UI( uiB ); + expB = expF32UI( uiB ); + sigB = fracF32UI( uiB ); + signZ = signA ^ signB; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0xFF ) { + if ( sigA || ((expB == 0xFF) && sigB) ) goto propagateNaN; + magBits = expB | sigB; + goto infArg; + } + if ( expB == 0xFF ) { + if ( sigB ) goto propagateNaN; + magBits = expA | sigA; + goto infArg; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) goto zero; + normExpSig = softfloat_normSubnormalF32Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + if ( ! expB ) { + if ( ! sigB ) goto zero; + normExpSig = softfloat_normSubnormalF32Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA + expB - 0x7F; + sigA = (sigA | 0x00800000)<<7; + sigB = (sigB | 0x00800000)<<8; + sigZ = softfloat_shortShiftRightJam64( (uint_fast64_t) sigA * sigB, 32 ); + if ( sigZ < 0x40000000 ) { + --expZ; + sigZ <<= 1; + } + return softfloat_roundPackToF32( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF32UI( uiA, uiB ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infArg: + if ( ! magBits ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF32UI; + } else { + uiZ = packToF32UI( signZ, 0xFF, 0 ); + } + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zero: + uiZ = packToF32UI( signZ, 0, 0 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_mulAdd.c b/vendor/riscv-isa-sim/softfloat/f32_mulAdd.c new file mode 100644 index 00000000..e98021b7 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_mulAdd.c @@ -0,0 +1,60 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t f32_mulAdd( float32_t a, float32_t b, float32_t c ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + union ui32_f32 uC; + uint_fast32_t uiC; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + uC.f = c; + uiC = uC.ui; + return softfloat_mulAddF32( uiA, uiB, uiC, 0 ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_rem.c b/vendor/riscv-isa-sim/softfloat/f32_rem.c new file mode 100644 index 00000000..771b1b94 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_rem.c @@ -0,0 +1,168 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f32_rem( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool signA; + int_fast16_t expA; + uint_fast32_t sigA; + union ui32_f32 uB; + uint_fast32_t uiB; + int_fast16_t expB; + uint_fast32_t sigB; + struct exp16_sig32 normExpSig; + uint32_t rem; + int_fast16_t expDiff; + uint32_t q, recip32, altRem, meanRem; + bool signRem; + uint_fast32_t uiZ; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF32UI( uiA ); + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + uB.f = b; + uiB = uB.ui; + expB = expF32UI( uiB ); + sigB = fracF32UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0xFF ) { + if ( sigA || ((expB == 0xFF) && sigB) ) goto propagateNaN; + goto invalid; + } + if ( expB == 0xFF ) { + if ( sigB ) goto propagateNaN; + return a; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expB ) { + if ( ! sigB ) goto invalid; + normExpSig = softfloat_normSubnormalF32Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + if ( ! expA ) { + if ( ! sigA ) return a; + normExpSig = softfloat_normSubnormalF32Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + rem = sigA | 0x00800000; + sigB |= 0x00800000; + expDiff = expA - expB; + if ( expDiff < 1 ) { + if ( expDiff < -1 ) return a; + sigB <<= 6; + if ( expDiff ) { + rem <<= 5; + q = 0; + } else { + rem <<= 6; + q = (sigB <= rem); + if ( q ) rem -= sigB; + } + } else { + recip32 = softfloat_approxRecip32_1( sigB<<8 ); + /*-------------------------------------------------------------------- + | Changing the shift of `rem' here requires also changing the initial + | subtraction from `expDiff'. + *--------------------------------------------------------------------*/ + rem <<= 7; + expDiff -= 31; + /*-------------------------------------------------------------------- + | The scale of `sigB' affects how many bits are obtained during each + | cycle of the loop. Currently this is 29 bits per loop iteration, + | which is believed to be the maximum possible. + *--------------------------------------------------------------------*/ + sigB <<= 6; + for (;;) { + q = (rem * (uint_fast64_t) recip32)>>32; + if ( expDiff < 0 ) break; + rem = -(q * (uint32_t) sigB); + expDiff -= 29; + } + /*-------------------------------------------------------------------- + | (`expDiff' cannot be less than -30 here.) + *--------------------------------------------------------------------*/ + q >>= ~expDiff & 31; + rem = (rem<<(expDiff + 30)) - q * (uint32_t) sigB; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + do { + altRem = rem; + ++q; + rem -= sigB; + } while ( ! (rem & 0x80000000) ); + meanRem = rem + altRem; + if ( (meanRem & 0x80000000) || (! meanRem && (q & 1)) ) rem = altRem; + signRem = signA; + if ( 0x80000000 <= rem ) { + signRem = ! signRem; + rem = -rem; + } + return softfloat_normRoundPackToF32( signRem, expB, rem ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF32UI( uiA, uiB ); + goto uiZ; + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF32UI; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_roundToInt.c b/vendor/riscv-isa-sim/softfloat/f32_roundToInt.c new file mode 100644 index 00000000..0861b840 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_roundToInt.c @@ -0,0 +1,112 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f32_roundToInt( float32_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + int_fast16_t exp; + uint_fast32_t uiZ, lastBitMask, roundBitsMask; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp <= 0x7E ) { + if ( ! (uint32_t) (uiA<<1) ) return a; + if ( exact ) softfloat_exceptionFlags |= softfloat_flag_inexact; + uiZ = uiA & packToF32UI( 1, 0, 0 ); + switch ( roundingMode ) { + case softfloat_round_near_even: + if ( ! fracF32UI( uiA ) ) break; + case softfloat_round_near_maxMag: + if ( exp == 0x7E ) uiZ |= packToF32UI( 0, 0x7F, 0 ); + break; + case softfloat_round_min: + if ( uiZ ) uiZ = packToF32UI( 1, 0x7F, 0 ); + break; + case softfloat_round_max: + if ( ! uiZ ) uiZ = packToF32UI( 0, 0x7F, 0 ); + break; + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( 0x96 <= exp ) { + if ( (exp == 0xFF) && fracF32UI( uiA ) ) { + uiZ = softfloat_propagateNaNF32UI( uiA, 0 ); + goto uiZ; + } + return a; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uiZ = uiA; + lastBitMask = (uint_fast32_t) 1<<(0x96 - exp); + roundBitsMask = lastBitMask - 1; + if ( roundingMode == softfloat_round_near_maxMag ) { + uiZ += lastBitMask>>1; + } else if ( roundingMode == softfloat_round_near_even ) { + uiZ += lastBitMask>>1; + if ( ! (uiZ & roundBitsMask) ) uiZ &= ~lastBitMask; + } else if ( + roundingMode + == (signF32UI( uiZ ) ? softfloat_round_min : softfloat_round_max) + ) { + uiZ += roundBitsMask; + } + uiZ &= ~roundBitsMask; + if ( exact && (uiZ != uiA) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_sqrt.c b/vendor/riscv-isa-sim/softfloat/f32_sqrt.c new file mode 100644 index 00000000..5ef659e4 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_sqrt.c @@ -0,0 +1,121 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f32_sqrt( float32_t a ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool signA; + int_fast16_t expA; + uint_fast32_t sigA, uiZ; + struct exp16_sig32 normExpSig; + int_fast16_t expZ; + uint_fast32_t sigZ, shiftedSigZ; + uint32_t negRem; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF32UI( uiA ); + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0xFF ) { + if ( sigA ) { + uiZ = softfloat_propagateNaNF32UI( uiA, 0 ); + goto uiZ; + } + if ( ! signA ) return a; + goto invalid; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( signA ) { + if ( ! (expA | sigA) ) return a; + goto invalid; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) return a; + normExpSig = softfloat_normSubnormalF32Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = ((expA - 0x7F)>>1) + 0x7E; + expA &= 1; + sigA = (sigA | 0x00800000)<<8; + sigZ = + ((uint_fast64_t) sigA * softfloat_approxRecipSqrt32_1( expA, sigA )) + >>32; + if ( expA ) sigZ >>= 1; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sigZ += 2; + if ( (sigZ & 0x3F) < 2 ) { + shiftedSigZ = sigZ>>2; + negRem = shiftedSigZ * shiftedSigZ; + sigZ &= ~3; + if ( negRem & 0x80000000 ) { + sigZ |= 1; + } else { + if ( negRem ) --sigZ; + } + } + return softfloat_roundPackToF32( 0, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF32UI; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_sub.c b/vendor/riscv-isa-sim/softfloat/f32_sub.c new file mode 100644 index 00000000..d8307381 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_sub.c @@ -0,0 +1,70 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t f32_sub( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; +#if ! defined INLINE_LEVEL || (INLINE_LEVEL < 1) + float32_t (*magsFuncPtr)( uint_fast32_t, uint_fast32_t ); +#endif + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; +#if defined INLINE_LEVEL && (1 <= INLINE_LEVEL) + if ( signF32UI( uiA ^ uiB ) ) { + return softfloat_addMagsF32( uiA, uiB ); + } else { + return softfloat_subMagsF32( uiA, uiB ); + } +#else + magsFuncPtr = + signF32UI( uiA ^ uiB ) ? softfloat_addMagsF32 : softfloat_subMagsF32; + return (*magsFuncPtr)( uiA, uiB ); +#endif + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_to_f128.c b/vendor/riscv-isa-sim/softfloat/f32_to_f128.c new file mode 100644 index 00000000..bf519264 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_to_f128.c @@ -0,0 +1,96 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float128_t f32_to_f128( float32_t a ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool sign; + int_fast16_t exp; + uint_fast32_t frac; + struct commonNaN commonNaN; + struct uint128 uiZ; + struct exp16_sig32 normExpSig; + union ui128_f128 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF32UI( uiA ); + exp = expF32UI( uiA ); + frac = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0xFF ) { + if ( frac ) { + softfloat_f32UIToCommonNaN( uiA, &commonNaN ); + uiZ = softfloat_commonNaNToF128UI( &commonNaN ); + } else { + uiZ.v64 = packToF128UI64( sign, 0x7FFF, 0 ); + uiZ.v0 = 0; + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! exp ) { + if ( ! frac ) { + uiZ.v64 = packToF128UI64( sign, 0, 0 ); + uiZ.v0 = 0; + goto uiZ; + } + normExpSig = softfloat_normSubnormalF32Sig( frac ); + exp = normExpSig.exp - 1; + frac = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uiZ.v64 = packToF128UI64( sign, exp + 0x3F80, (uint_fast64_t) frac<<25 ); + uiZ.v0 = 0; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_to_f16.c b/vendor/riscv-isa-sim/softfloat/f32_to_f16.c new file mode 100644 index 00000000..7a971589 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_to_f16.c @@ -0,0 +1,88 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float16_t f32_to_f16( float32_t a ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool sign; + int_fast16_t exp; + uint_fast32_t frac; + struct commonNaN commonNaN; + uint_fast16_t uiZ, frac16; + union ui16_f16 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF32UI( uiA ); + exp = expF32UI( uiA ); + frac = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0xFF ) { + if ( frac ) { + softfloat_f32UIToCommonNaN( uiA, &commonNaN ); + uiZ = softfloat_commonNaNToF16UI( &commonNaN ); + } else { + uiZ = packToF16UI( sign, 0x1F, 0 ); + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + frac16 = frac>>9 | ((frac & 0x1FF) != 0); + if ( ! (exp | frac16) ) { + uiZ = packToF16UI( sign, 0, 0 ); + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + return softfloat_roundPackToF16( sign, exp - 0x71, frac16 | 0x4000 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_to_f64.c b/vendor/riscv-isa-sim/softfloat/f32_to_f64.c new file mode 100644 index 00000000..f9e02f22 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_to_f64.c @@ -0,0 +1,93 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f32_to_f64( float32_t a ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool sign; + int_fast16_t exp; + uint_fast32_t frac; + struct commonNaN commonNaN; + uint_fast64_t uiZ; + struct exp16_sig32 normExpSig; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF32UI( uiA ); + exp = expF32UI( uiA ); + frac = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0xFF ) { + if ( frac ) { + softfloat_f32UIToCommonNaN( uiA, &commonNaN ); + uiZ = softfloat_commonNaNToF64UI( &commonNaN ); + } else { + uiZ = packToF64UI( sign, 0x7FF, 0 ); + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! exp ) { + if ( ! frac ) { + uiZ = packToF64UI( sign, 0, 0 ); + goto uiZ; + } + normExpSig = softfloat_normSubnormalF32Sig( frac ); + exp = normExpSig.exp - 1; + frac = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uiZ = packToF64UI( sign, exp + 0x380, (uint_fast64_t) frac<<29 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_to_i16.c b/vendor/riscv-isa-sim/softfloat/f32_to_i16.c new file mode 100644 index 00000000..bde4c76c --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_to_i16.c @@ -0,0 +1,57 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "specialize.h" +#include "softfloat.h" + +int_fast16_t f32_to_i16( float32_t a, uint_fast8_t roundingMode, bool exact ) +{ + uint_fast8_t old_flags = softfloat_exceptionFlags; + + int_fast32_t sig32 = f32_to_i32(a, roundingMode, exact); + + if (sig32 > INT16_MAX) { + softfloat_exceptionFlags = old_flags | softfloat_flag_invalid; + return i16_fromPosOverflow; + } else if (sig32 < INT16_MIN) { + softfloat_exceptionFlags = old_flags | softfloat_flag_invalid; + return i16_fromNegOverflow; + } else { + return sig32; + } +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_to_i32.c b/vendor/riscv-isa-sim/softfloat/f32_to_i32.c new file mode 100644 index 00000000..c9f2cf9b --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_to_i32.c @@ -0,0 +1,84 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast32_t f32_to_i32( float32_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool sign; + int_fast16_t exp; + uint_fast32_t sig; + uint_fast64_t sig64; + int_fast16_t shiftDist; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF32UI( uiA ); + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ +#if (i32_fromNaN != i32_fromPosOverflow) || (i32_fromNaN != i32_fromNegOverflow) + if ( (exp == 0xFF) && sig ) { +#if (i32_fromNaN == i32_fromPosOverflow) + sign = 0; +#elif (i32_fromNaN == i32_fromNegOverflow) + sign = 1; +#else + softfloat_raiseFlags( softfloat_flag_invalid ); + return i32_fromNaN; +#endif + } +#endif + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= 0x00800000; + sig64 = (uint_fast64_t) sig<<32; + shiftDist = 0xAA - exp; + if ( 0 < shiftDist ) sig64 = softfloat_shiftRightJam64( sig64, shiftDist ); + return softfloat_roundToI32( sign, sig64, roundingMode, exact ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_to_i32_r_minMag.c b/vendor/riscv-isa-sim/softfloat/f32_to_i32_r_minMag.c new file mode 100644 index 00000000..1a94dcc6 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_to_i32_r_minMag.c @@ -0,0 +1,89 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast32_t f32_to_i32_r_minMag( float32_t a, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + int_fast16_t exp; + uint_fast32_t sig; + int_fast16_t shiftDist; + bool sign; + int_fast32_t absZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x9E - exp; + if ( 32 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF32UI( uiA ); + if ( shiftDist <= 0 ) { + if ( uiA == packToF32UI( 1, 0x9E, 0 ) ) return -0x7FFFFFFF - 1; + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0xFF) && sig ? i32_fromNaN + : sign ? i32_fromNegOverflow : i32_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig = (sig | 0x00800000)<<8; + absZ = sig>>shiftDist; + if ( exact && ((uint_fast32_t) absZ< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t f32_to_i64( float32_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool sign; + int_fast16_t exp; + uint_fast32_t sig; + int_fast16_t shiftDist; +#ifdef SOFTFLOAT_FAST_INT64 + uint_fast64_t sig64, extra; + struct uint64_extra sig64Extra; +#else + uint32_t extSig[3]; +#endif + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF32UI( uiA ); + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0xBE - exp; + if ( shiftDist < 0 ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0xFF) && sig ? i64_fromNaN + : sign ? i64_fromNegOverflow : i64_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= 0x00800000; +#ifdef SOFTFLOAT_FAST_INT64 + sig64 = (uint_fast64_t) sig<<40; + extra = 0; + if ( shiftDist ) { + sig64Extra = softfloat_shiftRightJam64Extra( sig64, 0, shiftDist ); + sig64 = sig64Extra.v; + extra = sig64Extra.extra; + } + return softfloat_roundToI64( sign, sig64, extra, roundingMode, exact ); +#else + extSig[indexWord( 3, 2 )] = sig<<8; + extSig[indexWord( 3, 1 )] = 0; + extSig[indexWord( 3, 0 )] = 0; + if ( shiftDist ) softfloat_shiftRightJam96M( extSig, shiftDist, extSig ); + return softfloat_roundMToI64( sign, extSig, roundingMode, exact ); +#endif + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_to_i64_r_minMag.c b/vendor/riscv-isa-sim/softfloat/f32_to_i64_r_minMag.c new file mode 100644 index 00000000..7d336a47 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_to_i64_r_minMag.c @@ -0,0 +1,94 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t f32_to_i64_r_minMag( float32_t a, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + int_fast16_t exp; + uint_fast32_t sig; + int_fast16_t shiftDist; + bool sign; + uint_fast64_t sig64; + int_fast64_t absZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0xBE - exp; + if ( 64 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF32UI( uiA ); + if ( shiftDist <= 0 ) { + if ( uiA == packToF32UI( 1, 0xBE, 0 ) ) { + return -INT64_C( 0x7FFFFFFFFFFFFFFF ) - 1; + } + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0xFF) && sig ? i64_fromNaN + : sign ? i64_fromNegOverflow : i64_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig |= 0x00800000; + sig64 = (uint_fast64_t) sig<<40; + absZ = sig64>>shiftDist; + shiftDist = 40 - shiftDist; + if ( exact && (shiftDist < 0) && (uint32_t) (sig<<(shiftDist & 31)) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return sign ? -absZ : absZ; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_to_ui16.c b/vendor/riscv-isa-sim/softfloat/f32_to_ui16.c new file mode 100644 index 00000000..073492bf --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_to_ui16.c @@ -0,0 +1,53 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "specialize.h" +#include "softfloat.h" + +uint_fast16_t f32_to_ui16( float32_t a, uint_fast8_t roundingMode, bool exact ) +{ + uint_fast8_t old_flags = softfloat_exceptionFlags; + + uint_fast32_t sig32 = f32_to_ui32(a, roundingMode, exact); + + if (sig32 > UINT16_MAX) { + softfloat_exceptionFlags = old_flags | softfloat_flag_invalid; + return ui16_fromPosOverflow; + } else { + return sig32; + } +} diff --git a/vendor/riscv-isa-sim/softfloat/f32_to_ui32.c b/vendor/riscv-isa-sim/softfloat/f32_to_ui32.c new file mode 100644 index 00000000..5ec279ba --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_to_ui32.c @@ -0,0 +1,84 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast32_t f32_to_ui32( float32_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool sign; + int_fast16_t exp; + uint_fast32_t sig; + uint_fast64_t sig64; + int_fast16_t shiftDist; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF32UI( uiA ); + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ +#if (ui32_fromNaN != ui32_fromPosOverflow) || (ui32_fromNaN != ui32_fromNegOverflow) + if ( (exp == 0xFF) && sig ) { +#if (ui32_fromNaN == ui32_fromPosOverflow) + sign = 0; +#elif (ui32_fromNaN == ui32_fromNegOverflow) + sign = 1; +#else + softfloat_raiseFlags( softfloat_flag_invalid ); + return ui32_fromNaN; +#endif + } +#endif + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= 0x00800000; + sig64 = (uint_fast64_t) sig<<32; + shiftDist = 0xAA - exp; + if ( 0 < shiftDist ) sig64 = softfloat_shiftRightJam64( sig64, shiftDist ); + return softfloat_roundToUI32( sign, sig64, roundingMode, exact ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_to_ui32_r_minMag.c b/vendor/riscv-isa-sim/softfloat/f32_to_ui32_r_minMag.c new file mode 100644 index 00000000..12f72619 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_to_ui32_r_minMag.c @@ -0,0 +1,88 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast32_t f32_to_ui32_r_minMag( float32_t a, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + int_fast16_t exp; + uint_fast32_t sig; + int_fast16_t shiftDist; + bool sign; + uint_fast32_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x9E - exp; + if ( 32 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF32UI( uiA ); + if ( sign || (shiftDist < 0) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0xFF) && sig ? ui32_fromNaN + : sign ? ui32_fromNegOverflow : ui32_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig = (sig | 0x00800000)<<8; + z = sig>>shiftDist; + if ( exact && (z< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t f32_to_ui64( float32_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool sign; + int_fast16_t exp; + uint_fast32_t sig; + int_fast16_t shiftDist; +#ifdef SOFTFLOAT_FAST_INT64 + uint_fast64_t sig64, extra; + struct uint64_extra sig64Extra; +#else + uint32_t extSig[3]; +#endif + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF32UI( uiA ); + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0xBE - exp; + if ( shiftDist < 0 ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0xFF) && sig ? ui64_fromNaN + : sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= 0x00800000; +#ifdef SOFTFLOAT_FAST_INT64 + sig64 = (uint_fast64_t) sig<<40; + extra = 0; + if ( shiftDist ) { + sig64Extra = softfloat_shiftRightJam64Extra( sig64, 0, shiftDist ); + sig64 = sig64Extra.v; + extra = sig64Extra.extra; + } + return softfloat_roundToUI64( sign, sig64, extra, roundingMode, exact ); +#else + extSig[indexWord( 3, 2 )] = sig<<8; + extSig[indexWord( 3, 1 )] = 0; + extSig[indexWord( 3, 0 )] = 0; + if ( shiftDist ) softfloat_shiftRightJam96M( extSig, shiftDist, extSig ); + return softfloat_roundMToUI64( sign, extSig, roundingMode, exact ); +#endif + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_to_ui64_r_minMag.c b/vendor/riscv-isa-sim/softfloat/f32_to_ui64_r_minMag.c new file mode 100644 index 00000000..f96f3e1f --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_to_ui64_r_minMag.c @@ -0,0 +1,90 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t f32_to_ui64_r_minMag( float32_t a, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + int_fast16_t exp; + uint_fast32_t sig; + int_fast16_t shiftDist; + bool sign; + uint_fast64_t sig64, z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0xBE - exp; + if ( 64 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF32UI( uiA ); + if ( sign || (shiftDist < 0) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0xFF) && sig ? ui64_fromNaN + : sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig |= 0x00800000; + sig64 = (uint_fast64_t) sig<<40; + z = sig64>>shiftDist; + shiftDist = 40 - shiftDist; + if ( exact && (shiftDist < 0) && (uint32_t) (sig<<(shiftDist & 31)) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return z; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_add.c b/vendor/riscv-isa-sim/softfloat/f64_add.c new file mode 100644 index 00000000..e9880ddf --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_add.c @@ -0,0 +1,74 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t f64_add( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool signA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signB; +#if ! defined INLINE_LEVEL || (INLINE_LEVEL < 2) + float64_t (*magsFuncPtr)( uint_fast64_t, uint_fast64_t, bool ); +#endif + + uA.f = a; + uiA = uA.ui; + signA = signF64UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF64UI( uiB ); +#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL) + if ( signA == signB ) { + return softfloat_addMagsF64( uiA, uiB, signA ); + } else { + return softfloat_subMagsF64( uiA, uiB, signA ); + } +#else + magsFuncPtr = + (signA == signB) ? softfloat_addMagsF64 : softfloat_subMagsF64; + return (*magsFuncPtr)( uiA, uiB, signA ); +#endif + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_classify.c b/vendor/riscv-isa-sim/softfloat/f64_classify.c new file mode 100755 index 00000000..180abde3 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_classify.c @@ -0,0 +1,36 @@ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast16_t f64_classify( float64_t a ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + + uA.f = a; + uiA = uA.ui; + + uint_fast16_t infOrNaN = expF64UI( uiA ) == 0x7FF; + uint_fast16_t subnormalOrZero = expF64UI( uiA ) == 0; + bool sign = signF64UI( uiA ); + bool fracZero = fracF64UI( uiA ) == 0; + bool isNaN = isNaNF64UI( uiA ); + bool isSNaN = softfloat_isSigNaNF64UI( uiA ); + + return + ( sign && infOrNaN && fracZero ) << 0 | + ( sign && !infOrNaN && !subnormalOrZero ) << 1 | + ( sign && subnormalOrZero && !fracZero ) << 2 | + ( sign && subnormalOrZero && fracZero ) << 3 | + ( !sign && infOrNaN && fracZero ) << 7 | + ( !sign && !infOrNaN && !subnormalOrZero ) << 6 | + ( !sign && subnormalOrZero && !fracZero ) << 5 | + ( !sign && subnormalOrZero && fracZero ) << 4 | + ( isNaN && isSNaN ) << 8 | + ( isNaN && !isSNaN ) << 9; +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_div.c b/vendor/riscv-isa-sim/softfloat/f64_div.c new file mode 100644 index 00000000..c5a2d4fe --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_div.c @@ -0,0 +1,172 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f64_div( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool signA; + int_fast16_t expA; + uint_fast64_t sigA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signB; + int_fast16_t expB; + uint_fast64_t sigB; + bool signZ; + struct exp16_sig64 normExpSig; + int_fast16_t expZ; + uint32_t recip32, sig32Z, doubleTerm; + uint_fast64_t rem; + uint32_t q; + uint_fast64_t sigZ; + uint_fast64_t uiZ; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF64UI( uiA ); + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF64UI( uiB ); + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + signZ = signA ^ signB; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FF ) { + if ( sigA ) goto propagateNaN; + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN; + goto invalid; + } + goto infinity; + } + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN; + goto zero; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expB ) { + if ( ! sigB ) { + if ( ! (expA | sigA) ) goto invalid; + softfloat_raiseFlags( softfloat_flag_infinite ); + goto infinity; + } + normExpSig = softfloat_normSubnormalF64Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + if ( ! expA ) { + if ( ! sigA ) goto zero; + normExpSig = softfloat_normSubnormalF64Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA - expB + 0x3FE; + sigA |= UINT64_C( 0x0010000000000000 ); + sigB |= UINT64_C( 0x0010000000000000 ); + if ( sigA < sigB ) { + --expZ; + sigA <<= 11; + } else { + sigA <<= 10; + } + sigB <<= 11; + recip32 = softfloat_approxRecip32_1( sigB>>32 ) - 2; + sig32Z = ((uint32_t) (sigA>>32) * (uint_fast64_t) recip32)>>32; + doubleTerm = sig32Z<<1; + rem = + ((sigA - (uint_fast64_t) doubleTerm * (uint32_t) (sigB>>32))<<28) + - (uint_fast64_t) doubleTerm * ((uint32_t) sigB>>4); + q = (((uint32_t) (rem>>32) * (uint_fast64_t) recip32)>>32) + 4; + sigZ = ((uint_fast64_t) sig32Z<<32) + ((uint_fast64_t) q<<4); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( (sigZ & 0x1FF) < 4<<4 ) { + q &= ~7; + sigZ &= ~(uint_fast64_t) 0x7F; + doubleTerm = q<<1; + rem = + ((rem - (uint_fast64_t) doubleTerm * (uint32_t) (sigB>>32))<<28) + - (uint_fast64_t) doubleTerm * ((uint32_t) sigB>>4); + if ( rem & UINT64_C( 0x8000000000000000 ) ) { + sigZ -= 1<<7; + } else { + if ( rem ) sigZ |= 1; + } + } + return softfloat_roundPackToF64( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF64UI( uiA, uiB ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF64UI; + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infinity: + uiZ = packToF64UI( signZ, 0x7FF, 0 ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zero: + uiZ = packToF64UI( signZ, 0, 0 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_eq.c b/vendor/riscv-isa-sim/softfloat/f64_eq.c new file mode 100644 index 00000000..ccb602a3 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_eq.c @@ -0,0 +1,66 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f64_eq( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF64UI( uiA ) || isNaNF64UI( uiB ) ) { + if ( + softfloat_isSigNaNF64UI( uiA ) || softfloat_isSigNaNF64UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + return (uiA == uiB) || ! ((uiA | uiB) & UINT64_C( 0x7FFFFFFFFFFFFFFF )); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_eq_signaling.c b/vendor/riscv-isa-sim/softfloat/f64_eq_signaling.c new file mode 100644 index 00000000..ee5a4414 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_eq_signaling.c @@ -0,0 +1,61 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f64_eq_signaling( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF64UI( uiA ) || isNaNF64UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + return (uiA == uiB) || ! ((uiA | uiB) & UINT64_C( 0x7FFFFFFFFFFFFFFF )); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_isSignalingNaN.c b/vendor/riscv-isa-sim/softfloat/f64_isSignalingNaN.c new file mode 100644 index 00000000..f55acb4a --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_isSignalingNaN.c @@ -0,0 +1,51 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f64_isSignalingNaN( float64_t a ) +{ + union ui64_f64 uA; + + uA.f = a; + return softfloat_isSigNaNF64UI( uA.ui ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_le.c b/vendor/riscv-isa-sim/softfloat/f64_le.c new file mode 100644 index 00000000..91fc994a --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_le.c @@ -0,0 +1,67 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f64_le( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF64UI( uiA ) || isNaNF64UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + signA = signF64UI( uiA ); + signB = signF64UI( uiB ); + return + (signA != signB) + ? signA || ! ((uiA | uiB) & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + : (uiA == uiB) || (signA ^ (uiA < uiB)); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_le_quiet.c b/vendor/riscv-isa-sim/softfloat/f64_le_quiet.c new file mode 100644 index 00000000..a5d332a5 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_le_quiet.c @@ -0,0 +1,72 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f64_le_quiet( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF64UI( uiA ) || isNaNF64UI( uiB ) ) { + if ( + softfloat_isSigNaNF64UI( uiA ) || softfloat_isSigNaNF64UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + signA = signF64UI( uiA ); + signB = signF64UI( uiB ); + return + (signA != signB) + ? signA || ! ((uiA | uiB) & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + : (uiA == uiB) || (signA ^ (uiA < uiB)); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_lt.c b/vendor/riscv-isa-sim/softfloat/f64_lt.c new file mode 100644 index 00000000..abf62fd3 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_lt.c @@ -0,0 +1,67 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f64_lt( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF64UI( uiA ) || isNaNF64UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + signA = signF64UI( uiA ); + signB = signF64UI( uiB ); + return + (signA != signB) + ? signA && ((uiA | uiB) & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + : (uiA != uiB) && (signA ^ (uiA < uiB)); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_lt_quiet.c b/vendor/riscv-isa-sim/softfloat/f64_lt_quiet.c new file mode 100644 index 00000000..6531f577 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_lt_quiet.c @@ -0,0 +1,72 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f64_lt_quiet( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF64UI( uiA ) || isNaNF64UI( uiB ) ) { + if ( + softfloat_isSigNaNF64UI( uiA ) || softfloat_isSigNaNF64UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + signA = signF64UI( uiA ); + signB = signF64UI( uiB ); + return + (signA != signB) + ? signA && ((uiA | uiB) & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + : (uiA != uiB) && (signA ^ (uiA < uiB)); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_mul.c b/vendor/riscv-isa-sim/softfloat/f64_mul.c new file mode 100644 index 00000000..86f66545 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_mul.c @@ -0,0 +1,150 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f64_mul( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool signA; + int_fast16_t expA; + uint_fast64_t sigA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signB; + int_fast16_t expB; + uint_fast64_t sigB; + bool signZ; + uint_fast64_t magBits; + struct exp16_sig64 normExpSig; + int_fast16_t expZ; +#ifdef SOFTFLOAT_FAST_INT64 + struct uint128 sig128Z; +#else + uint32_t sig128Z[4]; +#endif + uint_fast64_t sigZ, uiZ; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF64UI( uiA ); + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF64UI( uiB ); + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + signZ = signA ^ signB; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FF ) { + if ( sigA || ((expB == 0x7FF) && sigB) ) goto propagateNaN; + magBits = expB | sigB; + goto infArg; + } + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN; + magBits = expA | sigA; + goto infArg; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) goto zero; + normExpSig = softfloat_normSubnormalF64Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + if ( ! expB ) { + if ( ! sigB ) goto zero; + normExpSig = softfloat_normSubnormalF64Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA + expB - 0x3FF; + sigA = (sigA | UINT64_C( 0x0010000000000000 ))<<10; + sigB = (sigB | UINT64_C( 0x0010000000000000 ))<<11; +#ifdef SOFTFLOAT_FAST_INT64 + sig128Z = softfloat_mul64To128( sigA, sigB ); + sigZ = sig128Z.v64 | (sig128Z.v0 != 0); +#else + softfloat_mul64To128M( sigA, sigB, sig128Z ); + sigZ = + (uint64_t) sig128Z[indexWord( 4, 3 )]<<32 | sig128Z[indexWord( 4, 2 )]; + if ( sig128Z[indexWord( 4, 1 )] || sig128Z[indexWord( 4, 0 )] ) sigZ |= 1; +#endif + if ( sigZ < UINT64_C( 0x4000000000000000 ) ) { + --expZ; + sigZ <<= 1; + } + return softfloat_roundPackToF64( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF64UI( uiA, uiB ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infArg: + if ( ! magBits ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF64UI; + } else { + uiZ = packToF64UI( signZ, 0x7FF, 0 ); + } + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zero: + uiZ = packToF64UI( signZ, 0, 0 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_mulAdd.c b/vendor/riscv-isa-sim/softfloat/f64_mulAdd.c new file mode 100644 index 00000000..67fc44d3 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_mulAdd.c @@ -0,0 +1,60 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t f64_mulAdd( float64_t a, float64_t b, float64_t c ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + union ui64_f64 uC; + uint_fast64_t uiC; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + uC.f = c; + uiC = uC.ui; + return softfloat_mulAddF64( uiA, uiB, uiC, 0 ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_rem.c b/vendor/riscv-isa-sim/softfloat/f64_rem.c new file mode 100644 index 00000000..e9174554 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_rem.c @@ -0,0 +1,189 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f64_rem( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool signA; + int_fast16_t expA; + uint_fast64_t sigA; + union ui64_f64 uB; + uint_fast64_t uiB; + int_fast16_t expB; + uint_fast64_t sigB; + struct exp16_sig64 normExpSig; + uint64_t rem; + int_fast16_t expDiff; + uint32_t q, recip32; + uint_fast64_t q64; + uint64_t altRem, meanRem; + bool signRem; + uint_fast64_t uiZ; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF64UI( uiA ); + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + uB.f = b; + uiB = uB.ui; + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FF ) { + if ( sigA || ((expB == 0x7FF) && sigB) ) goto propagateNaN; + goto invalid; + } + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN; + return a; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA < expB - 1 ) return a; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expB ) { + if ( ! sigB ) goto invalid; + normExpSig = softfloat_normSubnormalF64Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + if ( ! expA ) { + if ( ! sigA ) return a; + normExpSig = softfloat_normSubnormalF64Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + rem = sigA | UINT64_C( 0x0010000000000000 ); + sigB |= UINT64_C( 0x0010000000000000 ); + expDiff = expA - expB; + if ( expDiff < 1 ) { + if ( expDiff < -1 ) return a; + sigB <<= 9; + if ( expDiff ) { + rem <<= 8; + q = 0; + } else { + rem <<= 9; + q = (sigB <= rem); + if ( q ) rem -= sigB; + } + } else { + recip32 = softfloat_approxRecip32_1( sigB>>21 ); + /*-------------------------------------------------------------------- + | Changing the shift of `rem' here requires also changing the initial + | subtraction from `expDiff'. + *--------------------------------------------------------------------*/ + rem <<= 9; + expDiff -= 30; + /*-------------------------------------------------------------------- + | The scale of `sigB' affects how many bits are obtained during each + | cycle of the loop. Currently this is 29 bits per loop iteration, + | the maximum possible. + *--------------------------------------------------------------------*/ + sigB <<= 9; + for (;;) { + q64 = (uint32_t) (rem>>32) * (uint_fast64_t) recip32; + if ( expDiff < 0 ) break; + q = (q64 + 0x80000000)>>32; +#ifdef SOFTFLOAT_FAST_INT64 + rem <<= 29; +#else + rem = (uint_fast64_t) (uint32_t) (rem>>3)<<32; +#endif + rem -= q * (uint64_t) sigB; + if ( rem & UINT64_C( 0x8000000000000000 ) ) rem += sigB; + expDiff -= 29; + } + /*-------------------------------------------------------------------- + | (`expDiff' cannot be less than -29 here.) + *--------------------------------------------------------------------*/ + q = (uint32_t) (q64>>32)>>(~expDiff & 31); + rem = (rem<<(expDiff + 30)) - q * (uint64_t) sigB; + if ( rem & UINT64_C( 0x8000000000000000 ) ) { + altRem = rem + sigB; + goto selectRem; + } + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + do { + altRem = rem; + ++q; + rem -= sigB; + } while ( ! (rem & UINT64_C( 0x8000000000000000 )) ); + selectRem: + meanRem = rem + altRem; + if ( + (meanRem & UINT64_C( 0x8000000000000000 )) || (! meanRem && (q & 1)) + ) { + rem = altRem; + } + signRem = signA; + if ( rem & UINT64_C( 0x8000000000000000 ) ) { + signRem = ! signRem; + rem = -rem; + } + return softfloat_normRoundPackToF64( signRem, expB, rem ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF64UI( uiA, uiB ); + goto uiZ; + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF64UI; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_roundToInt.c b/vendor/riscv-isa-sim/softfloat/f64_roundToInt.c new file mode 100644 index 00000000..7f810070 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_roundToInt.c @@ -0,0 +1,112 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f64_roundToInt( float64_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + int_fast16_t exp; + uint_fast64_t uiZ, lastBitMask, roundBitsMask; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp <= 0x3FE ) { + if ( ! (uiA & UINT64_C( 0x7FFFFFFFFFFFFFFF )) ) return a; + if ( exact ) softfloat_exceptionFlags |= softfloat_flag_inexact; + uiZ = uiA & packToF64UI( 1, 0, 0 ); + switch ( roundingMode ) { + case softfloat_round_near_even: + if ( ! fracF64UI( uiA ) ) break; + case softfloat_round_near_maxMag: + if ( exp == 0x3FE ) uiZ |= packToF64UI( 0, 0x3FF, 0 ); + break; + case softfloat_round_min: + if ( uiZ ) uiZ = packToF64UI( 1, 0x3FF, 0 ); + break; + case softfloat_round_max: + if ( ! uiZ ) uiZ = packToF64UI( 0, 0x3FF, 0 ); + break; + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( 0x433 <= exp ) { + if ( (exp == 0x7FF) && fracF64UI( uiA ) ) { + uiZ = softfloat_propagateNaNF64UI( uiA, 0 ); + goto uiZ; + } + return a; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uiZ = uiA; + lastBitMask = (uint_fast64_t) 1<<(0x433 - exp); + roundBitsMask = lastBitMask - 1; + if ( roundingMode == softfloat_round_near_maxMag ) { + uiZ += lastBitMask>>1; + } else if ( roundingMode == softfloat_round_near_even ) { + uiZ += lastBitMask>>1; + if ( ! (uiZ & roundBitsMask) ) uiZ &= ~lastBitMask; + } else if ( + roundingMode + == (signF64UI( uiZ ) ? softfloat_round_min : softfloat_round_max) + ) { + uiZ += roundBitsMask; + } + uiZ &= ~roundBitsMask; + if ( exact && (uiZ != uiA) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_sqrt.c b/vendor/riscv-isa-sim/softfloat/f64_sqrt.c new file mode 100644 index 00000000..9a06cfad --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_sqrt.c @@ -0,0 +1,133 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f64_sqrt( float64_t a ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool signA; + int_fast16_t expA; + uint_fast64_t sigA, uiZ; + struct exp16_sig64 normExpSig; + int_fast16_t expZ; + uint32_t sig32A, recipSqrt32, sig32Z; + uint_fast64_t rem; + uint32_t q; + uint_fast64_t sigZ, shiftedSigZ; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF64UI( uiA ); + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FF ) { + if ( sigA ) { + uiZ = softfloat_propagateNaNF64UI( uiA, 0 ); + goto uiZ; + } + if ( ! signA ) return a; + goto invalid; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( signA ) { + if ( ! (expA | sigA) ) return a; + goto invalid; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) return a; + normExpSig = softfloat_normSubnormalF64Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + | (`sig32Z' is guaranteed to be a lower bound on the square root of + | `sig32A', which makes `sig32Z' also a lower bound on the square root of + | `sigA'.) + *------------------------------------------------------------------------*/ + expZ = ((expA - 0x3FF)>>1) + 0x3FE; + expA &= 1; + sigA |= UINT64_C( 0x0010000000000000 ); + sig32A = sigA>>21; + recipSqrt32 = softfloat_approxRecipSqrt32_1( expA, sig32A ); + sig32Z = ((uint_fast64_t) sig32A * recipSqrt32)>>32; + if ( expA ) { + sigA <<= 8; + sig32Z >>= 1; + } else { + sigA <<= 9; + } + rem = sigA - (uint_fast64_t) sig32Z * sig32Z; + q = ((uint32_t) (rem>>2) * (uint_fast64_t) recipSqrt32)>>32; + sigZ = ((uint_fast64_t) sig32Z<<32 | 1<<5) + ((uint_fast64_t) q<<3); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( (sigZ & 0x1FF) < 0x22 ) { + sigZ &= ~(uint_fast64_t) 0x3F; + shiftedSigZ = sigZ>>6; + rem = (sigA<<52) - shiftedSigZ * shiftedSigZ; + if ( rem & UINT64_C( 0x8000000000000000 ) ) { + --sigZ; + } else { + if ( rem ) sigZ |= 1; + } + } + return softfloat_roundPackToF64( 0, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF64UI; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_sub.c b/vendor/riscv-isa-sim/softfloat/f64_sub.c new file mode 100644 index 00000000..0e990cd1 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_sub.c @@ -0,0 +1,74 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t f64_sub( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool signA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signB; +#if ! defined INLINE_LEVEL || (INLINE_LEVEL < 2) + float64_t (*magsFuncPtr)( uint_fast64_t, uint_fast64_t, bool ); +#endif + + uA.f = a; + uiA = uA.ui; + signA = signF64UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF64UI( uiB ); +#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL) + if ( signA == signB ) { + return softfloat_subMagsF64( uiA, uiB, signA ); + } else { + return softfloat_addMagsF64( uiA, uiB, signA ); + } +#else + magsFuncPtr = + (signA == signB) ? softfloat_subMagsF64 : softfloat_addMagsF64; + return (*magsFuncPtr)( uiA, uiB, signA ); +#endif + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_to_f128.c b/vendor/riscv-isa-sim/softfloat/f64_to_f128.c new file mode 100644 index 00000000..92c2d560 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_to_f128.c @@ -0,0 +1,98 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float128_t f64_to_f128( float64_t a ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool sign; + int_fast16_t exp; + uint_fast64_t frac; + struct commonNaN commonNaN; + struct uint128 uiZ; + struct exp16_sig64 normExpSig; + struct uint128 frac128; + union ui128_f128 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF64UI( uiA ); + exp = expF64UI( uiA ); + frac = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0x7FF ) { + if ( frac ) { + softfloat_f64UIToCommonNaN( uiA, &commonNaN ); + uiZ = softfloat_commonNaNToF128UI( &commonNaN ); + } else { + uiZ.v64 = packToF128UI64( sign, 0x7FFF, 0 ); + uiZ.v0 = 0; + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! exp ) { + if ( ! frac ) { + uiZ.v64 = packToF128UI64( sign, 0, 0 ); + uiZ.v0 = 0; + goto uiZ; + } + normExpSig = softfloat_normSubnormalF64Sig( frac ); + exp = normExpSig.exp - 1; + frac = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + frac128 = softfloat_shortShiftLeft128( 0, frac, 60 ); + uiZ.v64 = packToF128UI64( sign, exp + 0x3C00, frac128.v64 ); + uiZ.v0 = frac128.v0; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_to_f16.c b/vendor/riscv-isa-sim/softfloat/f64_to_f16.c new file mode 100644 index 00000000..325788c6 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_to_f16.c @@ -0,0 +1,88 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float16_t f64_to_f16( float64_t a ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool sign; + int_fast16_t exp; + uint_fast64_t frac; + struct commonNaN commonNaN; + uint_fast16_t uiZ, frac16; + union ui16_f16 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF64UI( uiA ); + exp = expF64UI( uiA ); + frac = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0x7FF ) { + if ( frac ) { + softfloat_f64UIToCommonNaN( uiA, &commonNaN ); + uiZ = softfloat_commonNaNToF16UI( &commonNaN ); + } else { + uiZ = packToF16UI( sign, 0x1F, 0 ); + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + frac16 = softfloat_shortShiftRightJam64( frac, 38 ); + if ( ! (exp | frac16) ) { + uiZ = packToF16UI( sign, 0, 0 ); + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + return softfloat_roundPackToF16( sign, exp - 0x3F1, frac16 | 0x4000 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_to_f32.c b/vendor/riscv-isa-sim/softfloat/f64_to_f32.c new file mode 100644 index 00000000..99b13dda --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_to_f32.c @@ -0,0 +1,88 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f64_to_f32( float64_t a ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool sign; + int_fast16_t exp; + uint_fast64_t frac; + struct commonNaN commonNaN; + uint_fast32_t uiZ, frac32; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF64UI( uiA ); + exp = expF64UI( uiA ); + frac = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0x7FF ) { + if ( frac ) { + softfloat_f64UIToCommonNaN( uiA, &commonNaN ); + uiZ = softfloat_commonNaNToF32UI( &commonNaN ); + } else { + uiZ = packToF32UI( sign, 0xFF, 0 ); + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + frac32 = softfloat_shortShiftRightJam64( frac, 22 ); + if ( ! (exp | frac32) ) { + uiZ = packToF32UI( sign, 0, 0 ); + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + return softfloat_roundPackToF32( sign, exp - 0x381, frac32 | 0x40000000 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_to_i32.c b/vendor/riscv-isa-sim/softfloat/f64_to_i32.c new file mode 100644 index 00000000..8712c0ac --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_to_i32.c @@ -0,0 +1,82 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast32_t f64_to_i32( float64_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool sign; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF64UI( uiA ); + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ +#if (i32_fromNaN != i32_fromPosOverflow) || (i32_fromNaN != i32_fromNegOverflow) + if ( (exp == 0x7FF) && sig ) { +#if (i32_fromNaN == i32_fromPosOverflow) + sign = 0; +#elif (i32_fromNaN == i32_fromNegOverflow) + sign = 1; +#else + softfloat_raiseFlags( softfloat_flag_invalid ); + return i32_fromNaN; +#endif + } +#endif + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= UINT64_C( 0x0010000000000000 ); + shiftDist = 0x427 - exp; + if ( 0 < shiftDist ) sig = softfloat_shiftRightJam64( sig, shiftDist ); + return softfloat_roundToI32( sign, sig, roundingMode, exact ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_to_i32_r_minMag.c b/vendor/riscv-isa-sim/softfloat/f64_to_i32_r_minMag.c new file mode 100644 index 00000000..b7e1e030 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_to_i32_r_minMag.c @@ -0,0 +1,96 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast32_t f64_to_i32_r_minMag( float64_t a, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + bool sign; + int_fast32_t absZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x433 - exp; + if ( 53 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF64UI( uiA ); + if ( shiftDist < 22 ) { + if ( + sign && (exp == 0x41E) && (sig < UINT64_C( 0x0000000000200000 )) + ) { + if ( exact && sig ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return -0x7FFFFFFF - 1; + } + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FF) && sig ? i32_fromNaN + : sign ? i32_fromNegOverflow : i32_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig |= UINT64_C( 0x0010000000000000 ); + absZ = sig>>shiftDist; + if ( exact && ((uint_fast64_t) (uint_fast32_t) absZ< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t f64_to_i64( float64_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool sign; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; +#ifdef SOFTFLOAT_FAST_INT64 + struct uint64_extra sigExtra; +#else + uint32_t extSig[3]; +#endif + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF64UI( uiA ); + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= UINT64_C( 0x0010000000000000 ); + shiftDist = 0x433 - exp; +#ifdef SOFTFLOAT_FAST_INT64 + if ( shiftDist <= 0 ) { + if ( shiftDist < -11 ) goto invalid; + sigExtra.v = sig<<-shiftDist; + sigExtra.extra = 0; + } else { + sigExtra = softfloat_shiftRightJam64Extra( sig, 0, shiftDist ); + } + return + softfloat_roundToI64( + sign, sigExtra.v, sigExtra.extra, roundingMode, exact ); +#else + extSig[indexWord( 3, 0 )] = 0; + if ( shiftDist <= 0 ) { + if ( shiftDist < -11 ) goto invalid; + sig <<= -shiftDist; + extSig[indexWord( 3, 2 )] = sig>>32; + extSig[indexWord( 3, 1 )] = sig; + } else { + extSig[indexWord( 3, 2 )] = sig>>32; + extSig[indexWord( 3, 1 )] = sig; + softfloat_shiftRightJam96M( extSig, shiftDist, extSig ); + } + return softfloat_roundMToI64( sign, extSig, roundingMode, exact ); +#endif + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FF) && fracF64UI( uiA ) ? i64_fromNaN + : sign ? i64_fromNegOverflow : i64_fromPosOverflow; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_to_i64_r_minMag.c b/vendor/riscv-isa-sim/softfloat/f64_to_i64_r_minMag.c new file mode 100644 index 00000000..3822606d --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_to_i64_r_minMag.c @@ -0,0 +1,100 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t f64_to_i64_r_minMag( float64_t a, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool sign; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + int_fast64_t absZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF64UI( uiA ); + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x433 - exp; + if ( shiftDist <= 0 ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( shiftDist < -10 ) { + if ( uiA == packToF64UI( 1, 0x43E, 0 ) ) { + return -INT64_C( 0x7FFFFFFFFFFFFFFF ) - 1; + } + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FF) && sig ? i64_fromNaN + : sign ? i64_fromNegOverflow : i64_fromPosOverflow; + } + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + sig |= UINT64_C( 0x0010000000000000 ); + absZ = sig<<-shiftDist; + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( 53 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + sig |= UINT64_C( 0x0010000000000000 ); + absZ = sig>>shiftDist; + if ( exact && (absZ< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast32_t f64_to_ui32( float64_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool sign; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF64UI( uiA ); + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ +#if (ui32_fromNaN != ui32_fromPosOverflow) || (ui32_fromNaN != ui32_fromNegOverflow) + if ( (exp == 0x7FF) && sig ) { +#if (ui32_fromNaN == ui32_fromPosOverflow) + sign = 0; +#elif (ui32_fromNaN == ui32_fromNegOverflow) + sign = 1; +#else + softfloat_raiseFlags( softfloat_flag_invalid ); + return ui32_fromNaN; +#endif + } +#endif + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= UINT64_C( 0x0010000000000000 ); + shiftDist = 0x427 - exp; + if ( 0 < shiftDist ) sig = softfloat_shiftRightJam64( sig, shiftDist ); + return softfloat_roundToUI32( sign, sig, roundingMode, exact ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_to_ui32_r_minMag.c b/vendor/riscv-isa-sim/softfloat/f64_to_ui32_r_minMag.c new file mode 100644 index 00000000..11f0b050 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_to_ui32_r_minMag.c @@ -0,0 +1,88 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast32_t f64_to_ui32_r_minMag( float64_t a, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + bool sign; + uint_fast32_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x433 - exp; + if ( 53 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF64UI( uiA ); + if ( sign || (shiftDist < 21) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FF) && sig ? ui32_fromNaN + : sign ? ui32_fromNegOverflow : ui32_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig |= UINT64_C( 0x0010000000000000 ); + z = sig>>shiftDist; + if ( exact && ((uint_fast64_t) z< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t f64_to_ui64( float64_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool sign; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; +#ifdef SOFTFLOAT_FAST_INT64 + struct uint64_extra sigExtra; +#else + uint32_t extSig[3]; +#endif + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF64UI( uiA ); + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= UINT64_C( 0x0010000000000000 ); + shiftDist = 0x433 - exp; +#ifdef SOFTFLOAT_FAST_INT64 + if ( shiftDist <= 0 ) { + if ( shiftDist < -11 ) goto invalid; + sigExtra.v = sig<<-shiftDist; + sigExtra.extra = 0; + } else { + sigExtra = softfloat_shiftRightJam64Extra( sig, 0, shiftDist ); + } + return + softfloat_roundToUI64( + sign, sigExtra.v, sigExtra.extra, roundingMode, exact ); +#else + extSig[indexWord( 3, 0 )] = 0; + if ( shiftDist <= 0 ) { + if ( shiftDist < -11 ) goto invalid; + sig <<= -shiftDist; + extSig[indexWord( 3, 2 )] = sig>>32; + extSig[indexWord( 3, 1 )] = sig; + } else { + extSig[indexWord( 3, 2 )] = sig>>32; + extSig[indexWord( 3, 1 )] = sig; + softfloat_shiftRightJam96M( extSig, shiftDist, extSig ); + } + return softfloat_roundMToUI64( sign, extSig, roundingMode, exact ); +#endif + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FF) && fracF64UI( uiA ) ? ui64_fromNaN + : sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_to_ui64_r_minMag.c b/vendor/riscv-isa-sim/softfloat/f64_to_ui64_r_minMag.c new file mode 100644 index 00000000..25918c48 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_to_ui64_r_minMag.c @@ -0,0 +1,93 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t f64_to_ui64_r_minMag( float64_t a, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + bool sign; + uint_fast64_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x433 - exp; + if ( 53 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF64UI( uiA ); + if ( sign ) goto invalid; + if ( shiftDist <= 0 ) { + if ( shiftDist < -11 ) goto invalid; + z = (sig | UINT64_C( 0x0010000000000000 ))<<-shiftDist; + } else { + sig |= UINT64_C( 0x0010000000000000 ); + z = sig>>shiftDist; + if ( exact && (uint64_t) (sig<<(-shiftDist & 63)) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + } + return z; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FF) && sig ? ui64_fromNaN + : sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/fall_maxmin.c b/vendor/riscv-isa-sim/softfloat/fall_maxmin.c new file mode 100644 index 00000000..32a9ade5 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/fall_maxmin.c @@ -0,0 +1,81 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +#define COMPARE_MAX(a, b, bits) \ +float ## bits ## _t f ## bits ## _max( float ## bits ## _t a, float ## bits ## _t b ) \ +{ \ + bool greater = f ## bits ## _lt_quiet(b, a) || \ + (f ## bits ## _eq(b, a) && signF ## bits ## UI(b.v)); \ + \ + if (isNaNF ## bits ## UI(a.v) && isNaNF ## bits ## UI(b.v)) { \ + union ui ## bits ## _f ## bits ui; \ + ui.ui = defaultNaNF ## bits ## UI; \ + return ui.f; \ + } else { \ + return greater || isNaNF ## bits ## UI((b).v) ? a : b; \ + } \ +} + +#define COMPARE_MIN(a, b, bits) \ +float ## bits ## _t f ## bits ## _min( float ## bits ## _t a, float ## bits ## _t b ) \ +{ \ + bool less = f ## bits ## _lt_quiet(a, b) || \ + (f ## bits ## _eq(a, b) && signF ## bits ## UI(a.v)); \ + \ + if (isNaNF ## bits ## UI(a.v) && isNaNF ## bits ## UI(b.v)) { \ + union ui ## bits ## _f ## bits ui; \ + ui.ui = defaultNaNF ## bits ## UI; \ + return ui.f; \ + } else { \ + return less || isNaNF ## bits ## UI((b).v) ? a : b; \ + } \ +} + +COMPARE_MAX(a, b, 16); +COMPARE_MAX(a, b, 32); +COMPARE_MAX(a, b, 64); + +COMPARE_MIN(a, b, 16); +COMPARE_MIN(a, b, 32); +COMPARE_MIN(a, b, 64); diff --git a/vendor/riscv-isa-sim/softfloat/fall_reciprocal.c b/vendor/riscv-isa-sim/softfloat/fall_reciprocal.c new file mode 100644 index 00000000..1c964589 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/fall_reciprocal.c @@ -0,0 +1,392 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +static inline uint64_t extract64(uint64_t val, int pos, int len) +{ + assert(pos >= 0 && len > 0 && len <= 64 - pos); + return (val >> pos) & (~UINT64_C(0) >> (64 - len)); +} + +static inline uint64_t make_mask64(int pos, int len) +{ + assert(pos >= 0 && len > 0 && pos < 64 && len <= 64); + return (UINT64_MAX >> (64 - len)) << pos; +} + +//user needs to truncate output to required length +static inline uint64_t rsqrte7(uint64_t val, int e, int s, bool sub) { + uint64_t exp = extract64(val, s, e); + uint64_t sig = extract64(val, 0, s); + uint64_t sign = extract64(val, s + e, 1); + const int p = 7; + + static const uint8_t table[] = { + 52, 51, 50, 48, 47, 46, 44, 43, + 42, 41, 40, 39, 38, 36, 35, 34, + 33, 32, 31, 30, 30, 29, 28, 27, + 26, 25, 24, 23, 23, 22, 21, 20, + 19, 19, 18, 17, 16, 16, 15, 14, + 14, 13, 12, 12, 11, 10, 10, 9, + 9, 8, 7, 7, 6, 6, 5, 4, + 4, 3, 3, 2, 2, 1, 1, 0, + 127, 125, 123, 121, 119, 118, 116, 114, + 113, 111, 109, 108, 106, 105, 103, 102, + 100, 99, 97, 96, 95, 93, 92, 91, + 90, 88, 87, 86, 85, 84, 83, 82, + 80, 79, 78, 77, 76, 75, 74, 73, + 72, 71, 70, 70, 69, 68, 67, 66, + 65, 64, 63, 63, 62, 61, 60, 59, + 59, 58, 57, 56, 56, 55, 54, 53}; + + if (sub) { + while (extract64(sig, s - 1, 1) == 0) + exp--, sig <<= 1; + + sig = (sig << 1) & make_mask64(0 ,s); + } + + int idx = ((exp & 1) << (p-1)) | (sig >> (s-p+1)); + uint64_t out_sig = (uint64_t)(table[idx]) << (s-p); + uint64_t out_exp = (3 * make_mask64(0, e - 1) + ~exp) / 2; + + return (sign << (s+e)) | (out_exp << s) | out_sig; +} + +float16_t f16_rsqrte7(float16_t in) +{ + union ui16_f16 uA; + + uA.f = in; + unsigned int ret = f16_classify(in); + bool sub = false; + switch(ret) { + case 0x001: // -inf + case 0x002: // -normal + case 0x004: // -subnormal + case 0x100: // sNaN + softfloat_exceptionFlags |= softfloat_flag_invalid; + case 0x200: //qNaN + uA.ui = defaultNaNF16UI; + break; + case 0x008: // -0 + uA.ui = 0xfc00; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x010: // +0 + uA.ui = 0x7c00; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x080: //+inf + uA.ui = 0x0; + break; + case 0x020: //+ sub + sub = true; + default: // +num + uA.ui = rsqrte7(uA.ui, 5, 10, sub); + break; + } + + return uA.f; +} + +float32_t f32_rsqrte7(float32_t in) +{ + union ui32_f32 uA; + + uA.f = in; + unsigned int ret = f32_classify(in); + bool sub = false; + switch(ret) { + case 0x001: // -inf + case 0x002: // -normal + case 0x004: // -subnormal + case 0x100: // sNaN + softfloat_exceptionFlags |= softfloat_flag_invalid; + case 0x200: //qNaN + uA.ui = defaultNaNF32UI; + break; + case 0x008: // -0 + uA.ui = 0xff800000; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x010: // +0 + uA.ui = 0x7f800000; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x080: //+inf + uA.ui = 0x0; + break; + case 0x020: //+ sub + sub = true; + default: // +num + uA.ui = rsqrte7(uA.ui, 8, 23, sub); + break; + } + + return uA.f; +} + +float64_t f64_rsqrte7(float64_t in) +{ + union ui64_f64 uA; + + uA.f = in; + unsigned int ret = f64_classify(in); + bool sub = false; + switch(ret) { + case 0x001: // -inf + case 0x002: // -normal + case 0x004: // -subnormal + case 0x100: // sNaN + softfloat_exceptionFlags |= softfloat_flag_invalid; + case 0x200: //qNaN + uA.ui = defaultNaNF64UI; + break; + case 0x008: // -0 + uA.ui = 0xfff0000000000000ul; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x010: // +0 + uA.ui = 0x7ff0000000000000ul; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x080: //+inf + uA.ui = 0x0; + break; + case 0x020: //+ sub + sub = true; + default: // +num + uA.ui = rsqrte7(uA.ui, 11, 52, sub); + break; + } + + return uA.f; +} + +//user needs to truncate output to required length +static inline uint64_t recip7(uint64_t val, int e, int s, int rm, bool sub, + bool *round_abnormal) +{ + uint64_t exp = extract64(val, s, e); + uint64_t sig = extract64(val, 0, s); + uint64_t sign = extract64(val, s + e, 1); + const int p = 7; + + static const uint8_t table[] = { + 127, 125, 123, 121, 119, 117, 116, 114, + 112, 110, 109, 107, 105, 104, 102, 100, + 99, 97, 96, 94, 93, 91, 90, 88, + 87, 85, 84, 83, 81, 80, 79, 77, + 76, 75, 74, 72, 71, 70, 69, 68, + 66, 65, 64, 63, 62, 61, 60, 59, + 58, 57, 56, 55, 54, 53, 52, 51, + 50, 49, 48, 47, 46, 45, 44, 43, + 42, 41, 40, 40, 39, 38, 37, 36, + 35, 35, 34, 33, 32, 31, 31, 30, + 29, 28, 28, 27, 26, 25, 25, 24, + 23, 23, 22, 21, 21, 20, 19, 19, + 18, 17, 17, 16, 15, 15, 14, 14, + 13, 12, 12, 11, 11, 10, 9, 9, + 8, 8, 7, 7, 6, 5, 5, 4, + 4, 3, 3, 2, 2, 1, 1, 0}; + + if (sub) { + while (extract64(sig, s - 1, 1) == 0) + exp--, sig <<= 1; + + sig = (sig << 1) & make_mask64(0 ,s); + + if (exp != 0 && exp != UINT64_MAX) { + *round_abnormal = true; + if (rm == 1 || + (rm == 2 && !sign) || + (rm == 3 && sign)) + return ((sign << (s+e)) | make_mask64(s, e)) - 1; + else + return (sign << (s+e)) | make_mask64(s, e); + } + } + + int idx = sig >> (s-p); + uint64_t out_sig = (uint64_t)(table[idx]) << (s-p); + uint64_t out_exp = 2 * make_mask64(0, e - 1) + ~exp; + if (out_exp == 0 || out_exp == UINT64_MAX) { + out_sig = (out_sig >> 1) | make_mask64(s - 1, 1); + if (out_exp == UINT64_MAX) { + out_sig >>= 1; + out_exp = 0; + } + } + + return (sign << (s+e)) | (out_exp << s) | out_sig; +} + +float16_t f16_recip7(float16_t in) +{ + union ui16_f16 uA; + + uA.f = in; + unsigned int ret = f16_classify(in); + bool sub = false; + bool round_abnormal = false; + switch(ret) { + case 0x001: // -inf + uA.ui = 0x8000; + break; + case 0x080: //+inf + uA.ui = 0x0; + break; + case 0x008: // -0 + uA.ui = 0xfc00; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x010: // +0 + uA.ui = 0x7c00; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x100: // sNaN + softfloat_exceptionFlags |= softfloat_flag_invalid; + case 0x200: //qNaN + uA.ui = defaultNaNF16UI; + break; + case 0x004: // -subnormal + case 0x020: //+ sub + sub = true; + default: // +- normal + uA.ui = recip7(uA.ui, 5, 10, + softfloat_roundingMode, sub, &round_abnormal); + if (round_abnormal) + softfloat_exceptionFlags |= softfloat_flag_inexact | + softfloat_flag_overflow; + break; + } + + return uA.f; +} + +float32_t f32_recip7(float32_t in) +{ + union ui32_f32 uA; + + uA.f = in; + unsigned int ret = f32_classify(in); + bool sub = false; + bool round_abnormal = false; + switch(ret) { + case 0x001: // -inf + uA.ui = 0x80000000; + break; + case 0x080: //+inf + uA.ui = 0x0; + break; + case 0x008: // -0 + uA.ui = 0xff800000; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x010: // +0 + uA.ui = 0x7f800000; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x100: // sNaN + softfloat_exceptionFlags |= softfloat_flag_invalid; + case 0x200: //qNaN + uA.ui = defaultNaNF32UI; + break; + case 0x004: // -subnormal + case 0x020: //+ sub + sub = true; + default: // +- normal + uA.ui = recip7(uA.ui, 8, 23, + softfloat_roundingMode, sub, &round_abnormal); + if (round_abnormal) + softfloat_exceptionFlags |= softfloat_flag_inexact | + softfloat_flag_overflow; + break; + } + + return uA.f; +} + +float64_t f64_recip7(float64_t in) +{ + union ui64_f64 uA; + + uA.f = in; + unsigned int ret = f64_classify(in); + bool sub = false; + bool round_abnormal = false; + switch(ret) { + case 0x001: // -inf + uA.ui = 0x8000000000000000; + break; + case 0x080: //+inf + uA.ui = 0x0; + break; + case 0x008: // -0 + uA.ui = 0xfff0000000000000; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x010: // +0 + uA.ui = 0x7ff0000000000000; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x100: // sNaN + softfloat_exceptionFlags |= softfloat_flag_invalid; + case 0x200: //qNaN + uA.ui = defaultNaNF64UI; + break; + case 0x004: // -subnormal + case 0x020: //+ sub + sub = true; + default: // +- normal + uA.ui = recip7(uA.ui, 11, 52, + softfloat_roundingMode, sub, &round_abnormal); + if (round_abnormal) + softfloat_exceptionFlags |= softfloat_flag_inexact | + softfloat_flag_overflow; + break; + } + + return uA.f; +} diff --git a/vendor/riscv-isa-sim/softfloat/i32_to_f128.c b/vendor/riscv-isa-sim/softfloat/i32_to_f128.c new file mode 100644 index 00000000..af7268ae --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/i32_to_f128.c @@ -0,0 +1,64 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float128_t i32_to_f128( int32_t a ) +{ + uint_fast64_t uiZ64; + bool sign; + uint_fast32_t absA; + int_fast8_t shiftDist; + union ui128_f128 uZ; + + uiZ64 = 0; + if ( a ) { + sign = (a < 0); + absA = sign ? -(uint_fast32_t) a : (uint_fast32_t) a; + shiftDist = softfloat_countLeadingZeros32( absA ) + 17; + uiZ64 = + packToF128UI64( + sign, 0x402E - shiftDist, (uint_fast64_t) absA< +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float16_t i32_to_f16( int32_t a ) +{ + bool sign; + uint_fast32_t absA; + int_fast8_t shiftDist; + union ui16_f16 u; + uint_fast16_t sig; + + sign = (a < 0); + absA = sign ? -(uint_fast32_t) a : (uint_fast32_t) a; + shiftDist = softfloat_countLeadingZeros32( absA ) - 21; + if ( 0 <= shiftDist ) { + u.ui = + a ? packToF16UI( + sign, 0x18 - shiftDist, (uint_fast16_t) absA<>(-shiftDist) + | ((uint32_t) (absA<<(shiftDist & 31)) != 0) + : (uint_fast16_t) absA< +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t i32_to_f32( int32_t a ) +{ + bool sign; + union ui32_f32 uZ; + uint_fast32_t absA; + + sign = (a < 0); + if ( ! (a & 0x7FFFFFFF) ) { + uZ.ui = sign ? packToF32UI( 1, 0x9E, 0 ) : 0; + return uZ.f; + } + absA = sign ? -(uint_fast32_t) a : (uint_fast32_t) a; + return softfloat_normRoundPackToF32( sign, 0x9C, absA ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/i32_to_f64.c b/vendor/riscv-isa-sim/softfloat/i32_to_f64.c new file mode 100644 index 00000000..d3901eb4 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/i32_to_f64.c @@ -0,0 +1,65 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t i32_to_f64( int32_t a ) +{ + uint_fast64_t uiZ; + bool sign; + uint_fast32_t absA; + int_fast8_t shiftDist; + union ui64_f64 uZ; + + if ( ! a ) { + uiZ = 0; + } else { + sign = (a < 0); + absA = sign ? -(uint_fast32_t) a : (uint_fast32_t) a; + shiftDist = softfloat_countLeadingZeros32( absA ) + 21; + uiZ = + packToF64UI( + sign, 0x432 - shiftDist, (uint_fast64_t) absA< +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float128_t i64_to_f128( int64_t a ) +{ + uint_fast64_t uiZ64, uiZ0; + bool sign; + uint_fast64_t absA; + int_fast8_t shiftDist; + struct uint128 zSig; + union ui128_f128 uZ; + + if ( ! a ) { + uiZ64 = 0; + uiZ0 = 0; + } else { + sign = (a < 0); + absA = sign ? -(uint_fast64_t) a : (uint_fast64_t) a; + shiftDist = softfloat_countLeadingZeros64( absA ) + 49; + if ( 64 <= shiftDist ) { + zSig.v64 = absA<<(shiftDist - 64); + zSig.v0 = 0; + } else { + zSig = softfloat_shortShiftLeft128( 0, absA, shiftDist ); + } + uiZ64 = packToF128UI64( sign, 0x406E - shiftDist, zSig.v64 ); + uiZ0 = zSig.v0; + } + uZ.ui.v64 = uiZ64; + uZ.ui.v0 = uiZ0; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/i64_to_f16.c b/vendor/riscv-isa-sim/softfloat/i64_to_f16.c new file mode 100644 index 00000000..56f01912 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/i64_to_f16.c @@ -0,0 +1,70 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float16_t i64_to_f16( int64_t a ) +{ + bool sign; + uint_fast64_t absA; + int_fast8_t shiftDist; + union ui16_f16 u; + uint_fast16_t sig; + + sign = (a < 0); + absA = sign ? -(uint_fast64_t) a : (uint_fast64_t) a; + shiftDist = softfloat_countLeadingZeros64( absA ) - 53; + if ( 0 <= shiftDist ) { + u.ui = + a ? packToF16UI( + sign, 0x18 - shiftDist, (uint_fast16_t) absA< +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t i64_to_f32( int64_t a ) +{ + bool sign; + uint_fast64_t absA; + int_fast8_t shiftDist; + union ui32_f32 u; + uint_fast32_t sig; + + sign = (a < 0); + absA = sign ? -(uint_fast64_t) a : (uint_fast64_t) a; + shiftDist = softfloat_countLeadingZeros64( absA ) - 40; + if ( 0 <= shiftDist ) { + u.ui = + a ? packToF32UI( + sign, 0x95 - shiftDist, (uint_fast32_t) absA< +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t i64_to_f64( int64_t a ) +{ + bool sign; + union ui64_f64 uZ; + uint_fast64_t absA; + + sign = (a < 0); + if ( ! (a & UINT64_C( 0x7FFFFFFFFFFFFFFF )) ) { + uZ.ui = sign ? packToF64UI( 1, 0x43E, 0 ) : 0; + return uZ.f; + } + absA = sign ? -(uint_fast64_t) a : (uint_fast64_t) a; + return softfloat_normRoundPackToF64( sign, 0x43C, absA ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/internals.h b/vendor/riscv-isa-sim/softfloat/internals.h new file mode 100644 index 00000000..55585e96 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/internals.h @@ -0,0 +1,286 @@ + +/*============================================================================ + +This C header file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#ifndef internals_h +#define internals_h 1 + +#include +#include +#include "primitives.h" +#include "softfloat_types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +union ui16_f16 { uint16_t ui; float16_t f; }; +union ui32_f32 { uint32_t ui; float32_t f; }; +union ui64_f64 { uint64_t ui; float64_t f; }; + +#ifdef SOFTFLOAT_FAST_INT64 +union extF80M_extF80 { struct extFloat80M fM; extFloat80_t f; }; +union ui128_f128 { struct uint128 ui; float128_t f; }; +#endif + +enum { + softfloat_mulAdd_subC = 1, + softfloat_mulAdd_subProd = 2 +}; + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +uint_fast32_t softfloat_roundToUI32( bool, uint_fast64_t, uint_fast8_t, bool ); + +#ifdef SOFTFLOAT_FAST_INT64 +uint_fast64_t + softfloat_roundToUI64( + bool, uint_fast64_t, uint_fast64_t, uint_fast8_t, bool ); +#else +uint_fast64_t softfloat_roundMToUI64( bool, uint32_t *, uint_fast8_t, bool ); +#endif + +int_fast32_t softfloat_roundToI32( bool, uint_fast64_t, uint_fast8_t, bool ); + +#ifdef SOFTFLOAT_FAST_INT64 +int_fast64_t + softfloat_roundToI64( + bool, uint_fast64_t, uint_fast64_t, uint_fast8_t, bool ); +#else +int_fast64_t softfloat_roundMToI64( bool, uint32_t *, uint_fast8_t, bool ); +#endif + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +#define signF16UI( a ) ((bool) ((uint16_t) (a)>>15)) +#define expF16UI( a ) ((int_fast8_t) ((a)>>10) & 0x1F) +#define fracF16UI( a ) ((a) & 0x03FF) +#define packToF16UI( sign, exp, sig ) (((uint16_t) (sign)<<15) + ((uint16_t) (exp)<<10) + (sig)) + +#define isNaNF16UI( a ) (((~(a) & 0x7C00) == 0) && ((a) & 0x03FF)) + +struct exp8_sig16 { int_fast8_t exp; uint_fast16_t sig; }; +struct exp8_sig16 softfloat_normSubnormalF16Sig( uint_fast16_t ); + +float16_t softfloat_roundPackToF16( bool, int_fast16_t, uint_fast16_t ); +float16_t softfloat_normRoundPackToF16( bool, int_fast16_t, uint_fast16_t ); + +float16_t softfloat_addMagsF16( uint_fast16_t, uint_fast16_t ); +float16_t softfloat_subMagsF16( uint_fast16_t, uint_fast16_t ); +float16_t + softfloat_mulAddF16( + uint_fast16_t, uint_fast16_t, uint_fast16_t, uint_fast8_t ); + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +#define signF32UI( a ) ((bool) ((uint32_t) (a)>>31)) +#define expF32UI( a ) ((int_fast16_t) ((a)>>23) & 0xFF) +#define fracF32UI( a ) ((a) & 0x007FFFFF) +#define packToF32UI( sign, exp, sig ) (((uint32_t) (sign)<<31) + ((uint32_t) (exp)<<23) + (sig)) + +#define isNaNF32UI( a ) (((~(a) & 0x7F800000) == 0) && ((a) & 0x007FFFFF)) + +struct exp16_sig32 { int_fast16_t exp; uint_fast32_t sig; }; +struct exp16_sig32 softfloat_normSubnormalF32Sig( uint_fast32_t ); + +float32_t softfloat_roundPackToF32( bool, int_fast16_t, uint_fast32_t ); +float32_t softfloat_normRoundPackToF32( bool, int_fast16_t, uint_fast32_t ); + +float32_t softfloat_addMagsF32( uint_fast32_t, uint_fast32_t ); +float32_t softfloat_subMagsF32( uint_fast32_t, uint_fast32_t ); +float32_t + softfloat_mulAddF32( + uint_fast32_t, uint_fast32_t, uint_fast32_t, uint_fast8_t ); + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +#define signF64UI( a ) ((bool) ((uint64_t) (a)>>63)) +#define expF64UI( a ) ((int_fast16_t) ((a)>>52) & 0x7FF) +#define fracF64UI( a ) ((a) & UINT64_C( 0x000FFFFFFFFFFFFF )) +#define packToF64UI( sign, exp, sig ) ((uint64_t) (((uint_fast64_t) (sign)<<63) + ((uint_fast64_t) (exp)<<52) + (sig))) + +#define isNaNF64UI( a ) (((~(a) & UINT64_C( 0x7FF0000000000000 )) == 0) && ((a) & UINT64_C( 0x000FFFFFFFFFFFFF ))) + +struct exp16_sig64 { int_fast16_t exp; uint_fast64_t sig; }; +struct exp16_sig64 softfloat_normSubnormalF64Sig( uint_fast64_t ); + +float64_t softfloat_roundPackToF64( bool, int_fast16_t, uint_fast64_t ); +float64_t softfloat_normRoundPackToF64( bool, int_fast16_t, uint_fast64_t ); + +float64_t softfloat_addMagsF64( uint_fast64_t, uint_fast64_t, bool ); +float64_t softfloat_subMagsF64( uint_fast64_t, uint_fast64_t, bool ); +float64_t + softfloat_mulAddF64( + uint_fast64_t, uint_fast64_t, uint_fast64_t, uint_fast8_t ); + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +#define signExtF80UI64( a64 ) ((bool) ((uint16_t) (a64)>>15)) +#define expExtF80UI64( a64 ) ((a64) & 0x7FFF) +#define packToExtF80UI64( sign, exp ) ((uint_fast16_t) (sign)<<15 | (exp)) + +#define isNaNExtF80UI( a64, a0 ) ((((a64) & 0x7FFF) == 0x7FFF) && ((a0) & UINT64_C( 0x7FFFFFFFFFFFFFFF ))) + +#ifdef SOFTFLOAT_FAST_INT64 + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ + +struct exp32_sig64 { int_fast32_t exp; uint64_t sig; }; +struct exp32_sig64 softfloat_normSubnormalExtF80Sig( uint_fast64_t ); + +extFloat80_t + softfloat_roundPackToExtF80( + bool, int_fast32_t, uint_fast64_t, uint_fast64_t, uint_fast8_t ); +extFloat80_t + softfloat_normRoundPackToExtF80( + bool, int_fast32_t, uint_fast64_t, uint_fast64_t, uint_fast8_t ); + +extFloat80_t + softfloat_addMagsExtF80( + uint_fast16_t, uint_fast64_t, uint_fast16_t, uint_fast64_t, bool ); +extFloat80_t + softfloat_subMagsExtF80( + uint_fast16_t, uint_fast64_t, uint_fast16_t, uint_fast64_t, bool ); + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +#define signF128UI64( a64 ) ((bool) ((uint64_t) (a64)>>63)) +#define expF128UI64( a64 ) ((int_fast32_t) ((a64)>>48) & 0x7FFF) +#define fracF128UI64( a64 ) ((a64) & UINT64_C( 0x0000FFFFFFFFFFFF )) +#define packToF128UI64( sign, exp, sig64 ) (((uint_fast64_t) (sign)<<63) + ((uint_fast64_t) (exp)<<48) + (sig64)) + +#define isNaNF128UI( a64, a0 ) (((~(a64) & UINT64_C( 0x7FFF000000000000 )) == 0) && (a0 || ((a64) & UINT64_C( 0x0000FFFFFFFFFFFF )))) + +struct exp32_sig128 { int_fast32_t exp; struct uint128 sig; }; +struct exp32_sig128 + softfloat_normSubnormalF128Sig( uint_fast64_t, uint_fast64_t ); + +float128_t + softfloat_roundPackToF128( + bool, int_fast32_t, uint_fast64_t, uint_fast64_t, uint_fast64_t ); +float128_t + softfloat_normRoundPackToF128( + bool, int_fast32_t, uint_fast64_t, uint_fast64_t ); + +float128_t + softfloat_addMagsF128( + uint_fast64_t, uint_fast64_t, uint_fast64_t, uint_fast64_t, bool ); +float128_t + softfloat_subMagsF128( + uint_fast64_t, uint_fast64_t, uint_fast64_t, uint_fast64_t, bool ); +float128_t + softfloat_mulAddF128( + uint_fast64_t, + uint_fast64_t, + uint_fast64_t, + uint_fast64_t, + uint_fast64_t, + uint_fast64_t, + uint_fast8_t + ); + +#else + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ + +bool + softfloat_tryPropagateNaNExtF80M( + const struct extFloat80M *, + const struct extFloat80M *, + struct extFloat80M * + ); +void softfloat_invalidExtF80M( struct extFloat80M * ); + +int softfloat_normExtF80SigM( uint64_t * ); + +void + softfloat_roundPackMToExtF80M( + bool, int32_t, uint32_t *, uint_fast8_t, struct extFloat80M * ); +void + softfloat_normRoundPackMToExtF80M( + bool, int32_t, uint32_t *, uint_fast8_t, struct extFloat80M * ); + +void + softfloat_addExtF80M( + const struct extFloat80M *, + const struct extFloat80M *, + struct extFloat80M *, + bool + ); + +int + softfloat_compareNonnormExtF80M( + const struct extFloat80M *, const struct extFloat80M * ); + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +#define signF128UI96( a96 ) ((bool) ((uint32_t) (a96)>>31)) +#define expF128UI96( a96 ) ((int32_t) ((a96)>>16) & 0x7FFF) +#define fracF128UI96( a96 ) ((a96) & 0x0000FFFF) +#define packToF128UI96( sign, exp, sig96 ) (((uint32_t) (sign)<<31) + ((uint32_t) (exp)<<16) + (sig96)) + +bool softfloat_isNaNF128M( const uint32_t * ); + +bool + softfloat_tryPropagateNaNF128M( + const uint32_t *, const uint32_t *, uint32_t * ); +void softfloat_invalidF128M( uint32_t * ); + +int softfloat_shiftNormSigF128M( const uint32_t *, uint_fast8_t, uint32_t * ); + +void softfloat_roundPackMToF128M( bool, int32_t, uint32_t *, uint32_t * ); +void softfloat_normRoundPackMToF128M( bool, int32_t, uint32_t *, uint32_t * ); + +void + softfloat_addF128M( const uint32_t *, const uint32_t *, uint32_t *, bool ); +void + softfloat_mulAddF128M( + const uint32_t *, + const uint32_t *, + const uint32_t *, + uint32_t *, + uint_fast8_t + ); + +#endif + +#ifdef __cplusplus +} +#endif + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/platform.h b/vendor/riscv-isa-sim/softfloat/platform.h new file mode 100644 index 00000000..55de1941 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/platform.h @@ -0,0 +1,52 @@ + +/*============================================================================ + +This C header file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3a, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +#include "config.h" +#ifndef WORDS_BIGENDIAN +#define LITTLEENDIAN 1 +#endif + +#define INLINE_LEVEL 5 +#define SOFTFLOAT_FAST_INT64 +#define SOFTFLOAT_FAST_DIV64TO32 +#define SOFTFLOAT_ROUND_ODD + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +#define INLINE static inline + diff --git a/vendor/riscv-isa-sim/softfloat/primitiveTypes.h b/vendor/riscv-isa-sim/softfloat/primitiveTypes.h new file mode 100644 index 00000000..b1120491 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/primitiveTypes.h @@ -0,0 +1,86 @@ + +/*============================================================================ + +This C header file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3a, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#ifndef primitiveTypes_h +#define primitiveTypes_h 1 + +#include +#include "platform.h" + +#ifdef SOFTFLOAT_FAST_INT64 + +#ifdef LITTLEENDIAN +struct uint128 { uint64_t v0, v64; }; +struct uint64_extra { uint64_t extra, v; }; +struct uint128_extra { uint64_t extra; struct uint128 v; }; +#else +struct uint128 { uint64_t v64, v0; }; +struct uint64_extra { uint64_t v, extra; }; +struct uint128_extra { struct uint128 v; uint64_t extra; }; +#endif + +#endif + +/*---------------------------------------------------------------------------- +| These macros are used to isolate the differences in word order between big- +| endian and little-endian platforms. +*----------------------------------------------------------------------------*/ +#ifdef LITTLEENDIAN +#define wordIncr 1 +#define indexWord( total, n ) (n) +#define indexWordHi( total ) ((total) - 1) +#define indexWordLo( total ) 0 +#define indexMultiword( total, m, n ) (n) +#define indexMultiwordHi( total, n ) ((total) - (n)) +#define indexMultiwordLo( total, n ) 0 +#define indexMultiwordHiBut( total, n ) (n) +#define indexMultiwordLoBut( total, n ) 0 +#define INIT_UINTM4( v3, v2, v1, v0 ) { v0, v1, v2, v3 } +#else +#define wordIncr -1 +#define indexWord( total, n ) ((total) - 1 - (n)) +#define indexWordHi( total ) 0 +#define indexWordLo( total ) ((total) - 1) +#define indexMultiword( total, m, n ) ((total) - 1 - (m)) +#define indexMultiwordHi( total, n ) 0 +#define indexMultiwordLo( total, n ) ((total) - (n)) +#define indexMultiwordHiBut( total, n ) 0 +#define indexMultiwordLoBut( total, n ) (n) +#define INIT_UINTM4( v3, v2, v1, v0 ) { v3, v2, v1, v0 } +#endif + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/primitives.h b/vendor/riscv-isa-sim/softfloat/primitives.h new file mode 100644 index 00000000..1acc8a8a --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/primitives.h @@ -0,0 +1,1168 @@ + +/*============================================================================ + +This C header file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#ifndef primitives_h +#define primitives_h 1 + +#include +#include +#include "primitiveTypes.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef softfloat_shortShiftRightJam64 +/*---------------------------------------------------------------------------- +| Shifts 'a' right by the number of bits given in 'dist', which must be in +| the range 1 to 63. If any nonzero bits are shifted off, they are "jammed" +| into the least-significant bit of the shifted value by setting the least- +| significant bit to 1. This shifted-and-jammed value is returned. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL) +INLINE +uint64_t softfloat_shortShiftRightJam64( uint64_t a, uint_fast8_t dist ) + { return a>>dist | ((a & (((uint_fast64_t) 1<>dist | ((uint32_t) (a<<(-dist & 31)) != 0) : (a != 0); +} +#else +uint32_t softfloat_shiftRightJam32( uint32_t a, uint_fast16_t dist ); +#endif +#endif + +#ifndef softfloat_shiftRightJam64 +/*---------------------------------------------------------------------------- +| Shifts 'a' right by the number of bits given in 'dist', which must not +| be zero. If any nonzero bits are shifted off, they are "jammed" into the +| least-significant bit of the shifted value by setting the least-significant +| bit to 1. This shifted-and-jammed value is returned. +| The value of 'dist' can be arbitrarily large. In particular, if 'dist' is +| greater than 64, the result will be either 0 or 1, depending on whether 'a' +| is zero or nonzero. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && (3 <= INLINE_LEVEL) +INLINE uint64_t softfloat_shiftRightJam64( uint64_t a, uint_fast32_t dist ) +{ + return + (dist < 63) ? a>>dist | ((uint64_t) (a<<(-dist & 63)) != 0) : (a != 0); +} +#else +uint64_t softfloat_shiftRightJam64( uint64_t a, uint_fast32_t dist ); +#endif +#endif + +/*---------------------------------------------------------------------------- +| A constant table that translates an 8-bit unsigned integer (the array index) +| into the number of leading 0 bits before the most-significant 1 of that +| integer. For integer zero (index 0), the corresponding table element is 8. +*----------------------------------------------------------------------------*/ +extern const uint_least8_t softfloat_countLeadingZeros8[256]; + +#ifndef softfloat_countLeadingZeros16 +/*---------------------------------------------------------------------------- +| Returns the number of leading 0 bits before the most-significant 1 bit of +| 'a'. If 'a' is zero, 16 is returned. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL) +INLINE uint_fast8_t softfloat_countLeadingZeros16( uint16_t a ) +{ + uint_fast8_t count = 8; + if ( 0x100 <= a ) { + count = 0; + a >>= 8; + } + count += softfloat_countLeadingZeros8[a]; + return count; +} +#else +uint_fast8_t softfloat_countLeadingZeros16( uint16_t a ); +#endif +#endif + +#ifndef softfloat_countLeadingZeros32 +/*---------------------------------------------------------------------------- +| Returns the number of leading 0 bits before the most-significant 1 bit of +| 'a'. If 'a' is zero, 32 is returned. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && (3 <= INLINE_LEVEL) +INLINE uint_fast8_t softfloat_countLeadingZeros32( uint32_t a ) +{ + uint_fast8_t count = 0; + if ( a < 0x10000 ) { + count = 16; + a <<= 16; + } + if ( a < 0x1000000 ) { + count += 8; + a <<= 8; + } + count += softfloat_countLeadingZeros8[a>>24]; + return count; +} +#else +uint_fast8_t softfloat_countLeadingZeros32( uint32_t a ); +#endif +#endif + +#ifndef softfloat_countLeadingZeros64 +/*---------------------------------------------------------------------------- +| Returns the number of leading 0 bits before the most-significant 1 bit of +| 'a'. If 'a' is zero, 64 is returned. +*----------------------------------------------------------------------------*/ +uint_fast8_t softfloat_countLeadingZeros64( uint64_t a ); +#endif + +extern const uint16_t softfloat_approxRecip_1k0s[16]; +extern const uint16_t softfloat_approxRecip_1k1s[16]; + +#ifndef softfloat_approxRecip32_1 +/*---------------------------------------------------------------------------- +| Returns an approximation to the reciprocal of the number represented by 'a', +| where 'a' is interpreted as an unsigned fixed-point number with one integer +| bit and 31 fraction bits. The 'a' input must be "normalized", meaning that +| its most-significant bit (bit 31) must be 1. Thus, if A is the value of +| the fixed-point interpretation of 'a', then 1 <= A < 2. The returned value +| is interpreted as a pure unsigned fraction, having no integer bits and 32 +| fraction bits. The approximation returned is never greater than the true +| reciprocal 1/A, and it differs from the true reciprocal by at most 2.006 ulp +| (units in the last place). +*----------------------------------------------------------------------------*/ +#ifdef SOFTFLOAT_FAST_DIV64TO32 +#define softfloat_approxRecip32_1( a ) ((uint32_t) (UINT64_C( 0x7FFFFFFFFFFFFFFF ) / (uint32_t) (a))) +#else +uint32_t softfloat_approxRecip32_1( uint32_t a ); +#endif +#endif + +extern const uint16_t softfloat_approxRecipSqrt_1k0s[16]; +extern const uint16_t softfloat_approxRecipSqrt_1k1s[16]; + +#ifndef softfloat_approxRecipSqrt32_1 +/*---------------------------------------------------------------------------- +| Returns an approximation to the reciprocal of the square root of the number +| represented by 'a', where 'a' is interpreted as an unsigned fixed-point +| number either with one integer bit and 31 fraction bits or with two integer +| bits and 30 fraction bits. The format of 'a' is determined by 'oddExpA', +| which must be either 0 or 1. If 'oddExpA' is 1, 'a' is interpreted as +| having one integer bit, and if 'oddExpA' is 0, 'a' is interpreted as having +| two integer bits. The 'a' input must be "normalized", meaning that its +| most-significant bit (bit 31) must be 1. Thus, if A is the value of the +| fixed-point interpretation of 'a', it follows that 1 <= A < 2 when 'oddExpA' +| is 1, and 2 <= A < 4 when 'oddExpA' is 0. +| The returned value is interpreted as a pure unsigned fraction, having +| no integer bits and 32 fraction bits. The approximation returned is never +| greater than the true reciprocal 1/sqrt(A), and it differs from the true +| reciprocal by at most 2.06 ulp (units in the last place). The approximation +| returned is also always within the range 0.5 to 1; thus, the most- +| significant bit of the result is always set. +*----------------------------------------------------------------------------*/ +uint32_t softfloat_approxRecipSqrt32_1( unsigned int oddExpA, uint32_t a ); +#endif + +#ifdef SOFTFLOAT_FAST_INT64 + +/*---------------------------------------------------------------------------- +| The following functions are needed only when 'SOFTFLOAT_FAST_INT64' is +| defined. +*----------------------------------------------------------------------------*/ + +#ifndef softfloat_eq128 +/*---------------------------------------------------------------------------- +| Returns true if the 128-bit unsigned integer formed by concatenating 'a64' +| and 'a0' is equal to the 128-bit unsigned integer formed by concatenating +| 'b64' and 'b0'. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && (1 <= INLINE_LEVEL) +INLINE +bool softfloat_eq128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ) + { return (a64 == b64) && (a0 == b0); } +#else +bool softfloat_eq128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ); +#endif +#endif + +#ifndef softfloat_le128 +/*---------------------------------------------------------------------------- +| Returns true if the 128-bit unsigned integer formed by concatenating 'a64' +| and 'a0' is less than or equal to the 128-bit unsigned integer formed by +| concatenating 'b64' and 'b0'. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL) +INLINE +bool softfloat_le128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ) + { return (a64 < b64) || ((a64 == b64) && (a0 <= b0)); } +#else +bool softfloat_le128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ); +#endif +#endif + +#ifndef softfloat_lt128 +/*---------------------------------------------------------------------------- +| Returns true if the 128-bit unsigned integer formed by concatenating 'a64' +| and 'a0' is less than the 128-bit unsigned integer formed by concatenating +| 'b64' and 'b0'. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL) +INLINE +bool softfloat_lt128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ) + { return (a64 < b64) || ((a64 == b64) && (a0 < b0)); } +#else +bool softfloat_lt128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ); +#endif +#endif + +#ifndef softfloat_shortShiftLeft128 +/*---------------------------------------------------------------------------- +| Shifts the 128 bits formed by concatenating 'a64' and 'a0' left by the +| number of bits given in 'dist', which must be in the range 1 to 63. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL) +INLINE +struct uint128 + softfloat_shortShiftLeft128( uint64_t a64, uint64_t a0, uint_fast8_t dist ) +{ + struct uint128 z; + z.v64 = a64<>(-dist & 63); + z.v0 = a0<>dist; + z.v0 = a64<<(-dist & 63) | a0>>dist; + return z; +} +#else +struct uint128 + softfloat_shortShiftRight128( uint64_t a64, uint64_t a0, uint_fast8_t dist ); +#endif +#endif + +#ifndef softfloat_shortShiftRightJam64Extra +/*---------------------------------------------------------------------------- +| This function is the same as 'softfloat_shiftRightJam64Extra' (below), +| except that 'dist' must be in the range 1 to 63. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL) +INLINE +struct uint64_extra + softfloat_shortShiftRightJam64Extra( + uint64_t a, uint64_t extra, uint_fast8_t dist ) +{ + struct uint64_extra z; + z.v = a>>dist; + z.extra = a<<(-dist & 63) | (extra != 0); + return z; +} +#else +struct uint64_extra + softfloat_shortShiftRightJam64Extra( + uint64_t a, uint64_t extra, uint_fast8_t dist ); +#endif +#endif + +#ifndef softfloat_shortShiftRightJam128 +/*---------------------------------------------------------------------------- +| Shifts the 128 bits formed by concatenating 'a64' and 'a0' right by the +| number of bits given in 'dist', which must be in the range 1 to 63. If any +| nonzero bits are shifted off, they are "jammed" into the least-significant +| bit of the shifted value by setting the least-significant bit to 1. This +| shifted-and-jammed value is returned. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && (3 <= INLINE_LEVEL) +INLINE +struct uint128 + softfloat_shortShiftRightJam128( + uint64_t a64, uint64_t a0, uint_fast8_t dist ) +{ + uint_fast8_t negDist = -dist; + struct uint128 z; + z.v64 = a64>>dist; + z.v0 = + a64<<(negDist & 63) | a0>>dist + | ((uint64_t) (a0<<(negDist & 63)) != 0); + return z; +} +#else +struct uint128 + softfloat_shortShiftRightJam128( + uint64_t a64, uint64_t a0, uint_fast8_t dist ); +#endif +#endif + +#ifndef softfloat_shortShiftRightJam128Extra +/*---------------------------------------------------------------------------- +| This function is the same as 'softfloat_shiftRightJam128Extra' (below), +| except that 'dist' must be in the range 1 to 63. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && (3 <= INLINE_LEVEL) +INLINE +struct uint128_extra + softfloat_shortShiftRightJam128Extra( + uint64_t a64, uint64_t a0, uint64_t extra, uint_fast8_t dist ) +{ + uint_fast8_t negDist = -dist; + struct uint128_extra z; + z.v.v64 = a64>>dist; + z.v.v0 = a64<<(negDist & 63) | a0>>dist; + z.extra = a0<<(negDist & 63) | (extra != 0); + return z; +} +#else +struct uint128_extra + softfloat_shortShiftRightJam128Extra( + uint64_t a64, uint64_t a0, uint64_t extra, uint_fast8_t dist ); +#endif +#endif + +#ifndef softfloat_shiftRightJam64Extra +/*---------------------------------------------------------------------------- +| Shifts the 128 bits formed by concatenating 'a' and 'extra' right by 64 +| _plus_ the number of bits given in 'dist', which must not be zero. This +| shifted value is at most 64 nonzero bits and is returned in the 'v' field +| of the 'struct uint64_extra' result. The 64-bit 'extra' field of the result +| contains a value formed as follows from the bits that were shifted off: The +| _last_ bit shifted off is the most-significant bit of the 'extra' field, and +| the other 63 bits of the 'extra' field are all zero if and only if _all_but_ +| _the_last_ bits shifted off were all zero. +| (This function makes more sense if 'a' and 'extra' are considered to form +| an unsigned fixed-point number with binary point between 'a' and 'extra'. +| This fixed-point value is shifted right by the number of bits given in +| 'dist', and the integer part of this shifted value is returned in the 'v' +| field of the result. The fractional part of the shifted value is modified +| as described above and returned in the 'extra' field of the result.) +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && (4 <= INLINE_LEVEL) +INLINE +struct uint64_extra + softfloat_shiftRightJam64Extra( + uint64_t a, uint64_t extra, uint_fast32_t dist ) +{ + struct uint64_extra z; + if ( dist < 64 ) { + z.v = a>>dist; + z.extra = a<<(-dist & 63); + } else { + z.v = 0; + z.extra = (dist == 64) ? a : (a != 0); + } + z.extra |= (extra != 0); + return z; +} +#else +struct uint64_extra + softfloat_shiftRightJam64Extra( + uint64_t a, uint64_t extra, uint_fast32_t dist ); +#endif +#endif + +#ifndef softfloat_shiftRightJam128 +/*---------------------------------------------------------------------------- +| Shifts the 128 bits formed by concatenating 'a64' and 'a0' right by the +| number of bits given in 'dist', which must not be zero. If any nonzero bits +| are shifted off, they are "jammed" into the least-significant bit of the +| shifted value by setting the least-significant bit to 1. This shifted-and- +| jammed value is returned. +| The value of 'dist' can be arbitrarily large. In particular, if 'dist' is +| greater than 128, the result will be either 0 or 1, depending on whether the +| original 128 bits are all zeros. +*----------------------------------------------------------------------------*/ +struct uint128 + softfloat_shiftRightJam128( uint64_t a64, uint64_t a0, uint_fast32_t dist ); +#endif + +#ifndef softfloat_shiftRightJam128Extra +/*---------------------------------------------------------------------------- +| Shifts the 192 bits formed by concatenating 'a64', 'a0', and 'extra' right +| by 64 _plus_ the number of bits given in 'dist', which must not be zero. +| This shifted value is at most 128 nonzero bits and is returned in the 'v' +| field of the 'struct uint128_extra' result. The 64-bit 'extra' field of the +| result contains a value formed as follows from the bits that were shifted +| off: The _last_ bit shifted off is the most-significant bit of the 'extra' +| field, and the other 63 bits of the 'extra' field are all zero if and only +| if _all_but_the_last_ bits shifted off were all zero. +| (This function makes more sense if 'a64', 'a0', and 'extra' are considered +| to form an unsigned fixed-point number with binary point between 'a0' and +| 'extra'. This fixed-point value is shifted right by the number of bits +| given in 'dist', and the integer part of this shifted value is returned +| in the 'v' field of the result. The fractional part of the shifted value +| is modified as described above and returned in the 'extra' field of the +| result.) +*----------------------------------------------------------------------------*/ +struct uint128_extra + softfloat_shiftRightJam128Extra( + uint64_t a64, uint64_t a0, uint64_t extra, uint_fast32_t dist ); +#endif + +#ifndef softfloat_shiftRightJam256M +/*---------------------------------------------------------------------------- +| Shifts the 256-bit unsigned integer pointed to by 'aPtr' right by the number +| of bits given in 'dist', which must not be zero. If any nonzero bits are +| shifted off, they are "jammed" into the least-significant bit of the shifted +| value by setting the least-significant bit to 1. This shifted-and-jammed +| value is stored at the location pointed to by 'zPtr'. Each of 'aPtr' and +| 'zPtr' points to an array of four 64-bit elements that concatenate in the +| platform's normal endian order to form a 256-bit integer. +| The value of 'dist' can be arbitrarily large. In particular, if 'dist' +| is greater than 256, the stored result will be either 0 or 1, depending on +| whether the original 256 bits are all zeros. +*----------------------------------------------------------------------------*/ +void + softfloat_shiftRightJam256M( + const uint64_t *aPtr, uint_fast32_t dist, uint64_t *zPtr ); +#endif + +#ifndef softfloat_add128 +/*---------------------------------------------------------------------------- +| Returns the sum of the 128-bit integer formed by concatenating 'a64' and +| 'a0' and the 128-bit integer formed by concatenating 'b64' and 'b0'. The +| addition is modulo 2^128, so any carry out is lost. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL) +INLINE +struct uint128 + softfloat_add128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ) +{ + struct uint128 z; + z.v0 = a0 + b0; + z.v64 = a64 + b64 + (z.v0 < a0); + return z; +} +#else +struct uint128 + softfloat_add128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ); +#endif +#endif + +#ifndef softfloat_add256M +/*---------------------------------------------------------------------------- +| Adds the two 256-bit integers pointed to by 'aPtr' and 'bPtr'. The addition +| is modulo 2^256, so any carry out is lost. The sum is stored at the +| location pointed to by 'zPtr'. Each of 'aPtr', 'bPtr', and 'zPtr' points to +| an array of four 64-bit elements that concatenate in the platform's normal +| endian order to form a 256-bit integer. +*----------------------------------------------------------------------------*/ +void + softfloat_add256M( + const uint64_t *aPtr, const uint64_t *bPtr, uint64_t *zPtr ); +#endif + +#ifndef softfloat_sub128 +/*---------------------------------------------------------------------------- +| Returns the difference of the 128-bit integer formed by concatenating 'a64' +| and 'a0' and the 128-bit integer formed by concatenating 'b64' and 'b0'. +| The subtraction is modulo 2^128, so any borrow out (carry out) is lost. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL) +INLINE +struct uint128 + softfloat_sub128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ) +{ + struct uint128 z; + z.v0 = a0 - b0; + z.v64 = a64 - b64; + z.v64 -= (a0 < b0); + return z; +} +#else +struct uint128 + softfloat_sub128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ); +#endif +#endif + +#ifndef softfloat_sub256M +/*---------------------------------------------------------------------------- +| Subtracts the 256-bit integer pointed to by 'bPtr' from the 256-bit integer +| pointed to by 'aPtr'. The addition is modulo 2^256, so any borrow out +| (carry out) is lost. The difference is stored at the location pointed to +| by 'zPtr'. Each of 'aPtr', 'bPtr', and 'zPtr' points to an array of four +| 64-bit elements that concatenate in the platform's normal endian order to +| form a 256-bit integer. +*----------------------------------------------------------------------------*/ +void + softfloat_sub256M( + const uint64_t *aPtr, const uint64_t *bPtr, uint64_t *zPtr ); +#endif + +#ifndef softfloat_mul64ByShifted32To128 +/*---------------------------------------------------------------------------- +| Returns the 128-bit product of 'a', 'b', and 2^32. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && (3 <= INLINE_LEVEL) +INLINE struct uint128 softfloat_mul64ByShifted32To128( uint64_t a, uint32_t b ) +{ + uint_fast64_t mid; + struct uint128 z; + mid = (uint_fast64_t) (uint32_t) a * b; + z.v0 = mid<<32; + z.v64 = (uint_fast64_t) (uint32_t) (a>>32) * b + (mid>>32); + return z; +} +#else +struct uint128 softfloat_mul64ByShifted32To128( uint64_t a, uint32_t b ); +#endif +#endif + +#ifndef softfloat_mul64To128 +/*---------------------------------------------------------------------------- +| Returns the 128-bit product of 'a' and 'b'. +*----------------------------------------------------------------------------*/ +struct uint128 softfloat_mul64To128( uint64_t a, uint64_t b ); +#endif + +#ifndef softfloat_mul128By32 +/*---------------------------------------------------------------------------- +| Returns the product of the 128-bit integer formed by concatenating 'a64' and +| 'a0', multiplied by 'b'. The multiplication is modulo 2^128; any overflow +| bits are discarded. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && (4 <= INLINE_LEVEL) +INLINE +struct uint128 softfloat_mul128By32( uint64_t a64, uint64_t a0, uint32_t b ) +{ + struct uint128 z; + uint_fast64_t mid; + uint_fast32_t carry; + z.v0 = a0 * b; + mid = (uint_fast64_t) (uint32_t) (a0>>32) * b; + carry = (uint32_t) ((uint_fast32_t) (z.v0>>32) - (uint_fast32_t) mid); + z.v64 = a64 * b + (uint_fast32_t) ((mid + carry)>>32); + return z; +} +#else +struct uint128 softfloat_mul128By32( uint64_t a64, uint64_t a0, uint32_t b ); +#endif +#endif + +#ifndef softfloat_mul128To256M +/*---------------------------------------------------------------------------- +| Multiplies the 128-bit unsigned integer formed by concatenating 'a64' and +| 'a0' by the 128-bit unsigned integer formed by concatenating 'b64' and +| 'b0'. The 256-bit product is stored at the location pointed to by 'zPtr'. +| Argument 'zPtr' points to an array of four 64-bit elements that concatenate +| in the platform's normal endian order to form a 256-bit integer. +*----------------------------------------------------------------------------*/ +void + softfloat_mul128To256M( + uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0, uint64_t *zPtr ); +#endif + +#else + +/*---------------------------------------------------------------------------- +| The following functions are needed only when 'SOFTFLOAT_FAST_INT64' is not +| defined. +*----------------------------------------------------------------------------*/ + +#ifndef softfloat_compare96M +/*---------------------------------------------------------------------------- +| Compares the two 96-bit unsigned integers pointed to by 'aPtr' and 'bPtr'. +| Returns -1 if the first integer (A) is less than the second (B); returns 0 +| if the two integers are equal; and returns +1 if the first integer (A) +| is greater than the second (B). (The result is thus the signum of A - B.) +| Each of 'aPtr' and 'bPtr' points to an array of three 32-bit elements that +| concatenate in the platform's normal endian order to form a 96-bit integer. +*----------------------------------------------------------------------------*/ +int_fast8_t softfloat_compare96M( const uint32_t *aPtr, const uint32_t *bPtr ); +#endif + +#ifndef softfloat_compare128M +/*---------------------------------------------------------------------------- +| Compares the two 128-bit unsigned integers pointed to by 'aPtr' and 'bPtr'. +| Returns -1 if the first integer (A) is less than the second (B); returns 0 +| if the two integers are equal; and returns +1 if the first integer (A) +| is greater than the second (B). (The result is thus the signum of A - B.) +| Each of 'aPtr' and 'bPtr' points to an array of four 32-bit elements that +| concatenate in the platform's normal endian order to form a 128-bit integer. +*----------------------------------------------------------------------------*/ +int_fast8_t + softfloat_compare128M( const uint32_t *aPtr, const uint32_t *bPtr ); +#endif + +#ifndef softfloat_shortShiftLeft64To96M +/*---------------------------------------------------------------------------- +| Extends 'a' to 96 bits and shifts the value left by the number of bits given +| in 'dist', which must be in the range 1 to 31. The result is stored at the +| location pointed to by 'zPtr'. Argument 'zPtr' points to an array of three +| 32-bit elements that concatenate in the platform's normal endian order to +| form a 96-bit integer. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL) +INLINE +void + softfloat_shortShiftLeft64To96M( + uint64_t a, uint_fast8_t dist, uint32_t *zPtr ) +{ + zPtr[indexWord( 3, 0 )] = (uint32_t) a<>= 32 - dist; + zPtr[indexWord( 3, 2 )] = a>>32; + zPtr[indexWord( 3, 1 )] = a; +} +#else +void + softfloat_shortShiftLeft64To96M( + uint64_t a, uint_fast8_t dist, uint32_t *zPtr ); +#endif +#endif + +#ifndef softfloat_shortShiftLeftM +/*---------------------------------------------------------------------------- +| Shifts the N-bit unsigned integer pointed to by 'aPtr' left by the number +| of bits given in 'dist', where N = 'size_words' * 32. The value of 'dist' +| must be in the range 1 to 31. Any nonzero bits shifted off are lost. The +| shifted N-bit result is stored at the location pointed to by 'zPtr'. Each +| of 'aPtr' and 'zPtr' points to a 'size_words'-long array of 32-bit elements +| that concatenate in the platform's normal endian order to form an N-bit +| integer. +*----------------------------------------------------------------------------*/ +void + softfloat_shortShiftLeftM( + uint_fast8_t size_words, + const uint32_t *aPtr, + uint_fast8_t dist, + uint32_t *zPtr + ); +#endif + +#ifndef softfloat_shortShiftLeft96M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_shortShiftLeftM' with +| 'size_words' = 3 (N = 96). +*----------------------------------------------------------------------------*/ +#define softfloat_shortShiftLeft96M( aPtr, dist, zPtr ) softfloat_shortShiftLeftM( 3, aPtr, dist, zPtr ) +#endif + +#ifndef softfloat_shortShiftLeft128M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_shortShiftLeftM' with +| 'size_words' = 4 (N = 128). +*----------------------------------------------------------------------------*/ +#define softfloat_shortShiftLeft128M( aPtr, dist, zPtr ) softfloat_shortShiftLeftM( 4, aPtr, dist, zPtr ) +#endif + +#ifndef softfloat_shortShiftLeft160M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_shortShiftLeftM' with +| 'size_words' = 5 (N = 160). +*----------------------------------------------------------------------------*/ +#define softfloat_shortShiftLeft160M( aPtr, dist, zPtr ) softfloat_shortShiftLeftM( 5, aPtr, dist, zPtr ) +#endif + +#ifndef softfloat_shiftLeftM +/*---------------------------------------------------------------------------- +| Shifts the N-bit unsigned integer pointed to by 'aPtr' left by the number +| of bits given in 'dist', where N = 'size_words' * 32. The value of 'dist' +| must not be zero. Any nonzero bits shifted off are lost. The shifted +| N-bit result is stored at the location pointed to by 'zPtr'. Each of 'aPtr' +| and 'zPtr' points to a 'size_words'-long array of 32-bit elements that +| concatenate in the platform's normal endian order to form an N-bit integer. +| The value of 'dist' can be arbitrarily large. In particular, if 'dist' is +| greater than N, the stored result will be 0. +*----------------------------------------------------------------------------*/ +void + softfloat_shiftLeftM( + uint_fast8_t size_words, + const uint32_t *aPtr, + uint32_t dist, + uint32_t *zPtr + ); +#endif + +#ifndef softfloat_shiftLeft96M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_shiftLeftM' with +| 'size_words' = 3 (N = 96). +*----------------------------------------------------------------------------*/ +#define softfloat_shiftLeft96M( aPtr, dist, zPtr ) softfloat_shiftLeftM( 3, aPtr, dist, zPtr ) +#endif + +#ifndef softfloat_shiftLeft128M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_shiftLeftM' with +| 'size_words' = 4 (N = 128). +*----------------------------------------------------------------------------*/ +#define softfloat_shiftLeft128M( aPtr, dist, zPtr ) softfloat_shiftLeftM( 4, aPtr, dist, zPtr ) +#endif + +#ifndef softfloat_shiftLeft160M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_shiftLeftM' with +| 'size_words' = 5 (N = 160). +*----------------------------------------------------------------------------*/ +#define softfloat_shiftLeft160M( aPtr, dist, zPtr ) softfloat_shiftLeftM( 5, aPtr, dist, zPtr ) +#endif + +#ifndef softfloat_shortShiftRightM +/*---------------------------------------------------------------------------- +| Shifts the N-bit unsigned integer pointed to by 'aPtr' right by the number +| of bits given in 'dist', where N = 'size_words' * 32. The value of 'dist' +| must be in the range 1 to 31. Any nonzero bits shifted off are lost. The +| shifted N-bit result is stored at the location pointed to by 'zPtr'. Each +| of 'aPtr' and 'zPtr' points to a 'size_words'-long array of 32-bit elements +| that concatenate in the platform's normal endian order to form an N-bit +| integer. +*----------------------------------------------------------------------------*/ +void + softfloat_shortShiftRightM( + uint_fast8_t size_words, + const uint32_t *aPtr, + uint_fast8_t dist, + uint32_t *zPtr + ); +#endif + +#ifndef softfloat_shortShiftRight128M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_shortShiftRightM' with +| 'size_words' = 4 (N = 128). +*----------------------------------------------------------------------------*/ +#define softfloat_shortShiftRight128M( aPtr, dist, zPtr ) softfloat_shortShiftRightM( 4, aPtr, dist, zPtr ) +#endif + +#ifndef softfloat_shortShiftRight160M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_shortShiftRightM' with +| 'size_words' = 5 (N = 160). +*----------------------------------------------------------------------------*/ +#define softfloat_shortShiftRight160M( aPtr, dist, zPtr ) softfloat_shortShiftRightM( 5, aPtr, dist, zPtr ) +#endif + +#ifndef softfloat_shortShiftRightJamM +/*---------------------------------------------------------------------------- +| Shifts the N-bit unsigned integer pointed to by 'aPtr' right by the number +| of bits given in 'dist', where N = 'size_words' * 32. The value of 'dist' +| must be in the range 1 to 31. If any nonzero bits are shifted off, they are +| "jammed" into the least-significant bit of the shifted value by setting the +| least-significant bit to 1. This shifted-and-jammed N-bit result is stored +| at the location pointed to by 'zPtr'. Each of 'aPtr' and 'zPtr' points +| to a 'size_words'-long array of 32-bit elements that concatenate in the +| platform's normal endian order to form an N-bit integer. +*----------------------------------------------------------------------------*/ +void + softfloat_shortShiftRightJamM( + uint_fast8_t, const uint32_t *, uint_fast8_t, uint32_t * ); +#endif + +#ifndef softfloat_shortShiftRightJam160M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_shortShiftRightJamM' with +| 'size_words' = 5 (N = 160). +*----------------------------------------------------------------------------*/ +#define softfloat_shortShiftRightJam160M( aPtr, dist, zPtr ) softfloat_shortShiftRightJamM( 5, aPtr, dist, zPtr ) +#endif + +#ifndef softfloat_shiftRightM +/*---------------------------------------------------------------------------- +| Shifts the N-bit unsigned integer pointed to by 'aPtr' right by the number +| of bits given in 'dist', where N = 'size_words' * 32. The value of 'dist' +| must not be zero. Any nonzero bits shifted off are lost. The shifted +| N-bit result is stored at the location pointed to by 'zPtr'. Each of 'aPtr' +| and 'zPtr' points to a 'size_words'-long array of 32-bit elements that +| concatenate in the platform's normal endian order to form an N-bit integer. +| The value of 'dist' can be arbitrarily large. In particular, if 'dist' is +| greater than N, the stored result will be 0. +*----------------------------------------------------------------------------*/ +void + softfloat_shiftRightM( + uint_fast8_t size_words, + const uint32_t *aPtr, + uint32_t dist, + uint32_t *zPtr + ); +#endif + +#ifndef softfloat_shiftRight96M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_shiftRightM' with +| 'size_words' = 3 (N = 96). +*----------------------------------------------------------------------------*/ +#define softfloat_shiftRight96M( aPtr, dist, zPtr ) softfloat_shiftRightM( 3, aPtr, dist, zPtr ) +#endif + +#ifndef softfloat_shiftRightJamM +/*---------------------------------------------------------------------------- +| Shifts the N-bit unsigned integer pointed to by 'aPtr' right by the number +| of bits given in 'dist', where N = 'size_words' * 32. The value of 'dist' +| must not be zero. If any nonzero bits are shifted off, they are "jammed" +| into the least-significant bit of the shifted value by setting the least- +| significant bit to 1. This shifted-and-jammed N-bit result is stored +| at the location pointed to by 'zPtr'. Each of 'aPtr' and 'zPtr' points +| to a 'size_words'-long array of 32-bit elements that concatenate in the +| platform's normal endian order to form an N-bit integer. +| The value of 'dist' can be arbitrarily large. In particular, if 'dist' +| is greater than N, the stored result will be either 0 or 1, depending on +| whether the original N bits are all zeros. +*----------------------------------------------------------------------------*/ +void + softfloat_shiftRightJamM( + uint_fast8_t size_words, + const uint32_t *aPtr, + uint32_t dist, + uint32_t *zPtr + ); +#endif + +#ifndef softfloat_shiftRightJam96M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_shiftRightJamM' with +| 'size_words' = 3 (N = 96). +*----------------------------------------------------------------------------*/ +#define softfloat_shiftRightJam96M( aPtr, dist, zPtr ) softfloat_shiftRightJamM( 3, aPtr, dist, zPtr ) +#endif + +#ifndef softfloat_shiftRightJam128M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_shiftRightJamM' with +| 'size_words' = 4 (N = 128). +*----------------------------------------------------------------------------*/ +#define softfloat_shiftRightJam128M( aPtr, dist, zPtr ) softfloat_shiftRightJamM( 4, aPtr, dist, zPtr ) +#endif + +#ifndef softfloat_shiftRightJam160M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_shiftRightJamM' with +| 'size_words' = 5 (N = 160). +*----------------------------------------------------------------------------*/ +#define softfloat_shiftRightJam160M( aPtr, dist, zPtr ) softfloat_shiftRightJamM( 5, aPtr, dist, zPtr ) +#endif + +#ifndef softfloat_addM +/*---------------------------------------------------------------------------- +| Adds the two N-bit integers pointed to by 'aPtr' and 'bPtr', where N = +| 'size_words' * 32. The addition is modulo 2^N, so any carry out is lost. +| The N-bit sum is stored at the location pointed to by 'zPtr'. Each of +| 'aPtr', 'bPtr', and 'zPtr' points to a 'size_words'-long array of 32-bit +| elements that concatenate in the platform's normal endian order to form an +| N-bit integer. +*----------------------------------------------------------------------------*/ +void + softfloat_addM( + uint_fast8_t size_words, + const uint32_t *aPtr, + const uint32_t *bPtr, + uint32_t *zPtr + ); +#endif + +#ifndef softfloat_add96M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_addM' with 'size_words' +| = 3 (N = 96). +*----------------------------------------------------------------------------*/ +#define softfloat_add96M( aPtr, bPtr, zPtr ) softfloat_addM( 3, aPtr, bPtr, zPtr ) +#endif + +#ifndef softfloat_add128M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_addM' with 'size_words' +| = 4 (N = 128). +*----------------------------------------------------------------------------*/ +#define softfloat_add128M( aPtr, bPtr, zPtr ) softfloat_addM( 4, aPtr, bPtr, zPtr ) +#endif + +#ifndef softfloat_add160M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_addM' with 'size_words' +| = 5 (N = 160). +*----------------------------------------------------------------------------*/ +#define softfloat_add160M( aPtr, bPtr, zPtr ) softfloat_addM( 5, aPtr, bPtr, zPtr ) +#endif + +#ifndef softfloat_addCarryM +/*---------------------------------------------------------------------------- +| Adds the two N-bit unsigned integers pointed to by 'aPtr' and 'bPtr', where +| N = 'size_words' * 32, plus 'carry', which must be either 0 or 1. The N-bit +| sum (modulo 2^N) is stored at the location pointed to by 'zPtr', and any +| carry out is returned as the result. Each of 'aPtr', 'bPtr', and 'zPtr' +| points to a 'size_words'-long array of 32-bit elements that concatenate in +| the platform's normal endian order to form an N-bit integer. +*----------------------------------------------------------------------------*/ +uint_fast8_t + softfloat_addCarryM( + uint_fast8_t size_words, + const uint32_t *aPtr, + const uint32_t *bPtr, + uint_fast8_t carry, + uint32_t *zPtr + ); +#endif + +#ifndef softfloat_addComplCarryM +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_addCarryM', except that +| the value of the unsigned integer pointed to by 'bPtr' is bit-wise completed +| before the addition. +*----------------------------------------------------------------------------*/ +uint_fast8_t + softfloat_addComplCarryM( + uint_fast8_t size_words, + const uint32_t *aPtr, + const uint32_t *bPtr, + uint_fast8_t carry, + uint32_t *zPtr + ); +#endif + +#ifndef softfloat_addComplCarry96M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_addComplCarryM' with +| 'size_words' = 3 (N = 96). +*----------------------------------------------------------------------------*/ +#define softfloat_addComplCarry96M( aPtr, bPtr, carry, zPtr ) softfloat_addComplCarryM( 3, aPtr, bPtr, carry, zPtr ) +#endif + +#ifndef softfloat_negXM +/*---------------------------------------------------------------------------- +| Replaces the N-bit unsigned integer pointed to by 'zPtr' by the +| 2s-complement of itself, where N = 'size_words' * 32. Argument 'zPtr' +| points to a 'size_words'-long array of 32-bit elements that concatenate in +| the platform's normal endian order to form an N-bit integer. +*----------------------------------------------------------------------------*/ +void softfloat_negXM( uint_fast8_t size_words, uint32_t *zPtr ); +#endif + +#ifndef softfloat_negX96M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_negXM' with 'size_words' +| = 3 (N = 96). +*----------------------------------------------------------------------------*/ +#define softfloat_negX96M( zPtr ) softfloat_negXM( 3, zPtr ) +#endif + +#ifndef softfloat_negX128M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_negXM' with 'size_words' +| = 4 (N = 128). +*----------------------------------------------------------------------------*/ +#define softfloat_negX128M( zPtr ) softfloat_negXM( 4, zPtr ) +#endif + +#ifndef softfloat_negX160M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_negXM' with 'size_words' +| = 5 (N = 160). +*----------------------------------------------------------------------------*/ +#define softfloat_negX160M( zPtr ) softfloat_negXM( 5, zPtr ) +#endif + +#ifndef softfloat_negX256M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_negXM' with 'size_words' +| = 8 (N = 256). +*----------------------------------------------------------------------------*/ +#define softfloat_negX256M( zPtr ) softfloat_negXM( 8, zPtr ) +#endif + +#ifndef softfloat_sub1XM +/*---------------------------------------------------------------------------- +| Subtracts 1 from the N-bit integer pointed to by 'zPtr', where N = +| 'size_words' * 32. The subtraction is modulo 2^N, so any borrow out (carry +| out) is lost. Argument 'zPtr' points to a 'size_words'-long array of 32-bit +| elements that concatenate in the platform's normal endian order to form an +| N-bit integer. +*----------------------------------------------------------------------------*/ +void softfloat_sub1XM( uint_fast8_t size_words, uint32_t *zPtr ); +#endif + +#ifndef softfloat_sub1X96M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_sub1XM' with 'size_words' +| = 3 (N = 96). +*----------------------------------------------------------------------------*/ +#define softfloat_sub1X96M( zPtr ) softfloat_sub1XM( 3, zPtr ) +#endif + +#ifndef softfloat_sub1X160M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_sub1XM' with 'size_words' +| = 5 (N = 160). +*----------------------------------------------------------------------------*/ +#define softfloat_sub1X160M( zPtr ) softfloat_sub1XM( 5, zPtr ) +#endif + +#ifndef softfloat_subM +/*---------------------------------------------------------------------------- +| Subtracts the two N-bit integers pointed to by 'aPtr' and 'bPtr', where N = +| 'size_words' * 32. The subtraction is modulo 2^N, so any borrow out (carry +| out) is lost. The N-bit difference is stored at the location pointed to by +| 'zPtr'. Each of 'aPtr', 'bPtr', and 'zPtr' points to a 'size_words'-long +| array of 32-bit elements that concatenate in the platform's normal endian +| order to form an N-bit integer. +*----------------------------------------------------------------------------*/ +void + softfloat_subM( + uint_fast8_t size_words, + const uint32_t *aPtr, + const uint32_t *bPtr, + uint32_t *zPtr + ); +#endif + +#ifndef softfloat_sub96M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_subM' with 'size_words' +| = 3 (N = 96). +*----------------------------------------------------------------------------*/ +#define softfloat_sub96M( aPtr, bPtr, zPtr ) softfloat_subM( 3, aPtr, bPtr, zPtr ) +#endif + +#ifndef softfloat_sub128M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_subM' with 'size_words' +| = 4 (N = 128). +*----------------------------------------------------------------------------*/ +#define softfloat_sub128M( aPtr, bPtr, zPtr ) softfloat_subM( 4, aPtr, bPtr, zPtr ) +#endif + +#ifndef softfloat_sub160M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_subM' with 'size_words' +| = 5 (N = 160). +*----------------------------------------------------------------------------*/ +#define softfloat_sub160M( aPtr, bPtr, zPtr ) softfloat_subM( 5, aPtr, bPtr, zPtr ) +#endif + +#ifndef softfloat_mul64To128M +/*---------------------------------------------------------------------------- +| Multiplies 'a' and 'b' and stores the 128-bit product at the location +| pointed to by 'zPtr'. Argument 'zPtr' points to an array of four 32-bit +| elements that concatenate in the platform's normal endian order to form a +| 128-bit integer. +*----------------------------------------------------------------------------*/ +void softfloat_mul64To128M( uint64_t a, uint64_t b, uint32_t *zPtr ); +#endif + +#ifndef softfloat_mul128MTo256M +/*---------------------------------------------------------------------------- +| Multiplies the two 128-bit unsigned integers pointed to by 'aPtr' and +| 'bPtr', and stores the 256-bit product at the location pointed to by 'zPtr'. +| Each of 'aPtr' and 'bPtr' points to an array of four 32-bit elements that +| concatenate in the platform's normal endian order to form a 128-bit integer. +| Argument 'zPtr' points to an array of eight 32-bit elements that concatenate +| to form a 256-bit integer. +*----------------------------------------------------------------------------*/ +void + softfloat_mul128MTo256M( + const uint32_t *aPtr, const uint32_t *bPtr, uint32_t *zPtr ); +#endif + +#ifndef softfloat_remStepMBy32 +/*---------------------------------------------------------------------------- +| Performs a "remainder reduction step" as follows: Arguments 'remPtr' and +| 'bPtr' both point to N-bit unsigned integers, where N = 'size_words' * 32. +| Defining R and B as the values of those integers, the expression (R<<'dist') +| - B * q is computed modulo 2^N, and the N-bit result is stored at the +| location pointed to by 'zPtr'. Each of 'remPtr', 'bPtr', and 'zPtr' points +| to a 'size_words'-long array of 32-bit elements that concatenate in the +| platform's normal endian order to form an N-bit integer. +*----------------------------------------------------------------------------*/ +void + softfloat_remStepMBy32( + uint_fast8_t size_words, + const uint32_t *remPtr, + uint_fast8_t dist, + const uint32_t *bPtr, + uint32_t q, + uint32_t *zPtr + ); +#endif + +#ifndef softfloat_remStep96MBy32 +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_remStepMBy32' with +| 'size_words' = 3 (N = 96). +*----------------------------------------------------------------------------*/ +#define softfloat_remStep96MBy32( remPtr, dist, bPtr, q, zPtr ) softfloat_remStepMBy32( 3, remPtr, dist, bPtr, q, zPtr ) +#endif + +#ifndef softfloat_remStep128MBy32 +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_remStepMBy32' with +| 'size_words' = 4 (N = 128). +*----------------------------------------------------------------------------*/ +#define softfloat_remStep128MBy32( remPtr, dist, bPtr, q, zPtr ) softfloat_remStepMBy32( 4, remPtr, dist, bPtr, q, zPtr ) +#endif + +#ifndef softfloat_remStep160MBy32 +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_remStepMBy32' with +| 'size_words' = 5 (N = 160). +*----------------------------------------------------------------------------*/ +#define softfloat_remStep160MBy32( remPtr, dist, bPtr, q, zPtr ) softfloat_remStepMBy32( 5, remPtr, dist, bPtr, q, zPtr ) +#endif + +#endif + +#ifdef __cplusplus +} +#endif + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_add128.c b/vendor/riscv-isa-sim/softfloat/s_add128.c new file mode 100644 index 00000000..8065656a --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_add128.c @@ -0,0 +1,55 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_add128 + +struct uint128 + softfloat_add128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ) +{ + struct uint128 z; + + z.v0 = a0 + b0; + z.v64 = a64 + b64 + (z.v0 < a0); + return z; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_add256M.c b/vendor/riscv-isa-sim/softfloat/s_add256M.c new file mode 100644 index 00000000..d07b0046 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_add256M.c @@ -0,0 +1,65 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_add256M + +void + softfloat_add256M( + const uint64_t *aPtr, const uint64_t *bPtr, uint64_t *zPtr ) +{ + unsigned int index; + uint_fast8_t carry; + uint64_t wordA, wordZ; + + index = indexWordLo( 4 ); + carry = 0; + for (;;) { + wordA = aPtr[index]; + wordZ = wordA + bPtr[index] + carry; + zPtr[index] = wordZ; + if ( index == indexWordHi( 4 ) ) break; + if ( wordZ != wordA ) carry = (wordZ < wordA); + index += wordIncr; + } + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_addCarryM.c b/vendor/riscv-isa-sim/softfloat/s_addCarryM.c new file mode 100644 index 00000000..fae1db49 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_addCarryM.c @@ -0,0 +1,70 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_addCarryM + +uint_fast8_t + softfloat_addCarryM( + uint_fast8_t size_words, + const uint32_t *aPtr, + const uint32_t *bPtr, + uint_fast8_t carry, + uint32_t *zPtr + ) +{ + unsigned int index, lastIndex; + uint32_t wordA, wordZ; + + index = indexWordLo( size_words ); + lastIndex = indexWordHi( size_words ); + for (;;) { + wordA = aPtr[index]; + wordZ = wordA + bPtr[index] + carry; + zPtr[index] = wordZ; + if ( wordZ != wordA ) carry = (wordZ < wordA); + if ( index == lastIndex ) break; + index += wordIncr; + } + return carry; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_addComplCarryM.c b/vendor/riscv-isa-sim/softfloat/s_addComplCarryM.c new file mode 100644 index 00000000..02f2bce4 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_addComplCarryM.c @@ -0,0 +1,70 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_addComplCarryM + +uint_fast8_t + softfloat_addComplCarryM( + uint_fast8_t size_words, + const uint32_t *aPtr, + const uint32_t *bPtr, + uint_fast8_t carry, + uint32_t *zPtr + ) +{ + unsigned int index, lastIndex; + uint32_t wordA, wordZ; + + index = indexWordLo( size_words ); + lastIndex = indexWordHi( size_words ); + for (;;) { + wordA = aPtr[index]; + wordZ = wordA + ~bPtr[index] + carry; + zPtr[index] = wordZ; + if ( wordZ != wordA ) carry = (wordZ < wordA); + if ( index == lastIndex ) break; + index += wordIncr; + } + return carry; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_addM.c b/vendor/riscv-isa-sim/softfloat/s_addM.c new file mode 100644 index 00000000..a06eda65 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_addM.c @@ -0,0 +1,70 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_addM + +void + softfloat_addM( + uint_fast8_t size_words, + const uint32_t *aPtr, + const uint32_t *bPtr, + uint32_t *zPtr + ) +{ + unsigned int index, lastIndex; + uint_fast8_t carry; + uint32_t wordA, wordZ; + + index = indexWordLo( size_words ); + lastIndex = indexWordHi( size_words ); + carry = 0; + for (;;) { + wordA = aPtr[index]; + wordZ = wordA + bPtr[index] + carry; + zPtr[index] = wordZ; + if ( index == lastIndex ) break; + if ( wordZ != wordA ) carry = (wordZ < wordA); + index += wordIncr; + } + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_addMagsF128.c b/vendor/riscv-isa-sim/softfloat/s_addMagsF128.c new file mode 100644 index 00000000..292f0aa5 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_addMagsF128.c @@ -0,0 +1,154 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" + +float128_t + softfloat_addMagsF128( + uint_fast64_t uiA64, + uint_fast64_t uiA0, + uint_fast64_t uiB64, + uint_fast64_t uiB0, + bool signZ + ) +{ + int_fast32_t expA; + struct uint128 sigA; + int_fast32_t expB; + struct uint128 sigB; + int_fast32_t expDiff; + struct uint128 uiZ, sigZ; + int_fast32_t expZ; + uint_fast64_t sigZExtra; + struct uint128_extra sig128Extra; + union ui128_f128 uZ; + + expA = expF128UI64( uiA64 ); + sigA.v64 = fracF128UI64( uiA64 ); + sigA.v0 = uiA0; + expB = expF128UI64( uiB64 ); + sigB.v64 = fracF128UI64( uiB64 ); + sigB.v0 = uiB0; + expDiff = expA - expB; + if ( ! expDiff ) { + if ( expA == 0x7FFF ) { + if ( sigA.v64 | sigA.v0 | sigB.v64 | sigB.v0 ) goto propagateNaN; + uiZ.v64 = uiA64; + uiZ.v0 = uiA0; + goto uiZ; + } + sigZ = softfloat_add128( sigA.v64, sigA.v0, sigB.v64, sigB.v0 ); + if ( ! expA ) { + uiZ.v64 = packToF128UI64( signZ, 0, sigZ.v64 ); + uiZ.v0 = sigZ.v0; + goto uiZ; + } + expZ = expA; + sigZ.v64 |= UINT64_C( 0x0002000000000000 ); + sigZExtra = 0; + goto shiftRight1; + } + if ( expDiff < 0 ) { + if ( expB == 0x7FFF ) { + if ( sigB.v64 | sigB.v0 ) goto propagateNaN; + uiZ.v64 = packToF128UI64( signZ, 0x7FFF, 0 ); + uiZ.v0 = 0; + goto uiZ; + } + expZ = expB; + if ( expA ) { + sigA.v64 |= UINT64_C( 0x0001000000000000 ); + } else { + ++expDiff; + sigZExtra = 0; + if ( ! expDiff ) goto newlyAligned; + } + sig128Extra = + softfloat_shiftRightJam128Extra( sigA.v64, sigA.v0, 0, -expDiff ); + sigA = sig128Extra.v; + sigZExtra = sig128Extra.extra; + } else { + if ( expA == 0x7FFF ) { + if ( sigA.v64 | sigA.v0 ) goto propagateNaN; + uiZ.v64 = uiA64; + uiZ.v0 = uiA0; + goto uiZ; + } + expZ = expA; + if ( expB ) { + sigB.v64 |= UINT64_C( 0x0001000000000000 ); + } else { + --expDiff; + sigZExtra = 0; + if ( ! expDiff ) goto newlyAligned; + } + sig128Extra = + softfloat_shiftRightJam128Extra( sigB.v64, sigB.v0, 0, expDiff ); + sigB = sig128Extra.v; + sigZExtra = sig128Extra.extra; + } + newlyAligned: + sigZ = + softfloat_add128( + sigA.v64 | UINT64_C( 0x0001000000000000 ), + sigA.v0, + sigB.v64, + sigB.v0 + ); + --expZ; + if ( sigZ.v64 < UINT64_C( 0x0002000000000000 ) ) goto roundAndPack; + ++expZ; + shiftRight1: + sig128Extra = + softfloat_shortShiftRightJam128Extra( + sigZ.v64, sigZ.v0, sigZExtra, 1 ); + sigZ = sig128Extra.v; + sigZExtra = sig128Extra.extra; + roundAndPack: + return + softfloat_roundPackToF128( signZ, expZ, sigZ.v64, sigZ.v0, sigZExtra ); + propagateNaN: + uiZ = softfloat_propagateNaNF128UI( uiA64, uiA0, uiB64, uiB0 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_addMagsF16.c b/vendor/riscv-isa-sim/softfloat/s_addMagsF16.c new file mode 100644 index 00000000..4204c1e0 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_addMagsF16.c @@ -0,0 +1,183 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float16_t softfloat_addMagsF16( uint_fast16_t uiA, uint_fast16_t uiB ) +{ + int_fast8_t expA; + uint_fast16_t sigA; + int_fast8_t expB; + uint_fast16_t sigB; + int_fast8_t expDiff; + uint_fast16_t uiZ; + bool signZ; + int_fast8_t expZ; + uint_fast16_t sigZ; + uint_fast16_t sigX, sigY; + int_fast8_t shiftDist; + uint_fast32_t sig32Z; + int_fast8_t roundingMode; + union ui16_f16 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expA = expF16UI( uiA ); + sigA = fracF16UI( uiA ); + expB = expF16UI( uiB ); + sigB = fracF16UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expA - expB; + if ( ! expDiff ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( ! expA ) { + uiZ = uiA + sigB; + goto uiZ; + } + if ( expA == 0x1F ) { + if ( sigA | sigB ) goto propagateNaN; + uiZ = uiA; + goto uiZ; + } + signZ = signF16UI( uiA ); + expZ = expA; + sigZ = 0x0800 + sigA + sigB; + if ( ! (sigZ & 1) && (expZ < 0x1E) ) { + sigZ >>= 1; + goto pack; + } + sigZ <<= 3; + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + signZ = signF16UI( uiA ); + if ( expDiff < 0 ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + if ( expB == 0x1F ) { + if ( sigB ) goto propagateNaN; + uiZ = packToF16UI( signZ, 0x1F, 0 ); + goto uiZ; + } + if ( expDiff <= -13 ) { + uiZ = packToF16UI( signZ, expB, sigB ); + if ( expA | sigA ) goto addEpsilon; + goto uiZ; + } + expZ = expB; + sigX = sigB | 0x0400; + sigY = sigA + (expA ? 0x0400 : sigA); + shiftDist = 19 + expDiff; + } else { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + uiZ = uiA; + if ( expA == 0x1F ) { + if ( sigA ) goto propagateNaN; + goto uiZ; + } + if ( 13 <= expDiff ) { + if ( expB | sigB ) goto addEpsilon; + goto uiZ; + } + expZ = expA; + sigX = sigA | 0x0400; + sigY = sigB + (expB ? 0x0400 : sigB); + shiftDist = 19 - expDiff; + } + sig32Z = + ((uint_fast32_t) sigX<<19) + ((uint_fast32_t) sigY<>16; + if ( sig32Z & 0xFFFF ) { + sigZ |= 1; + } else { + if ( ! (sigZ & 0xF) && (expZ < 0x1E) ) { + sigZ >>= 4; + goto pack; + } + } + } + return softfloat_roundPackToF16( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF16UI( uiA, uiB ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + addEpsilon: + roundingMode = softfloat_roundingMode; + if ( roundingMode != softfloat_round_near_even ) { + if ( + roundingMode + == (signF16UI( uiZ ) ? softfloat_round_min + : softfloat_round_max) + ) { + ++uiZ; + if ( (uint16_t) (uiZ<<1) == 0xF800 ) { + softfloat_raiseFlags( + softfloat_flag_overflow | softfloat_flag_inexact ); + } + } +#ifdef SOFTFLOAT_ROUND_ODD + else if ( roundingMode == softfloat_round_odd ) { + uiZ |= 1; + } +#endif + } + softfloat_exceptionFlags |= softfloat_flag_inexact; + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + pack: + uiZ = packToF16UI( signZ, expZ, sigZ ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_addMagsF32.c b/vendor/riscv-isa-sim/softfloat/s_addMagsF32.c new file mode 100644 index 00000000..ba647814 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_addMagsF32.c @@ -0,0 +1,126 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" + +float32_t softfloat_addMagsF32( uint_fast32_t uiA, uint_fast32_t uiB ) +{ + int_fast16_t expA; + uint_fast32_t sigA; + int_fast16_t expB; + uint_fast32_t sigB; + int_fast16_t expDiff; + uint_fast32_t uiZ; + bool signZ; + int_fast16_t expZ; + uint_fast32_t sigZ; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + expB = expF32UI( uiB ); + sigB = fracF32UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expA - expB; + if ( ! expDiff ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( ! expA ) { + uiZ = uiA + sigB; + goto uiZ; + } + if ( expA == 0xFF ) { + if ( sigA | sigB ) goto propagateNaN; + uiZ = uiA; + goto uiZ; + } + signZ = signF32UI( uiA ); + expZ = expA; + sigZ = 0x01000000 + sigA + sigB; + if ( ! (sigZ & 1) && (expZ < 0xFE) ) { + uiZ = packToF32UI( signZ, expZ, sigZ>>1 ); + goto uiZ; + } + sigZ <<= 6; + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + signZ = signF32UI( uiA ); + sigA <<= 6; + sigB <<= 6; + if ( expDiff < 0 ) { + if ( expB == 0xFF ) { + if ( sigB ) goto propagateNaN; + uiZ = packToF32UI( signZ, 0xFF, 0 ); + goto uiZ; + } + expZ = expB; + sigA += expA ? 0x20000000 : sigA; + sigA = softfloat_shiftRightJam32( sigA, -expDiff ); + } else { + if ( expA == 0xFF ) { + if ( sigA ) goto propagateNaN; + uiZ = uiA; + goto uiZ; + } + expZ = expA; + sigB += expB ? 0x20000000 : sigB; + sigB = softfloat_shiftRightJam32( sigB, expDiff ); + } + sigZ = 0x20000000 + sigA + sigB; + if ( sigZ < 0x40000000 ) { + --expZ; + sigZ <<= 1; + } + } + return softfloat_roundPackToF32( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF32UI( uiA, uiB ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_addMagsF64.c b/vendor/riscv-isa-sim/softfloat/s_addMagsF64.c new file mode 100644 index 00000000..63e1afe9 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_addMagsF64.c @@ -0,0 +1,128 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" + +float64_t + softfloat_addMagsF64( uint_fast64_t uiA, uint_fast64_t uiB, bool signZ ) +{ + int_fast16_t expA; + uint_fast64_t sigA; + int_fast16_t expB; + uint_fast64_t sigB; + int_fast16_t expDiff; + uint_fast64_t uiZ; + int_fast16_t expZ; + uint_fast64_t sigZ; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expA - expB; + if ( ! expDiff ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( ! expA ) { + uiZ = uiA + sigB; + goto uiZ; + } + if ( expA == 0x7FF ) { + if ( sigA | sigB ) goto propagateNaN; + uiZ = uiA; + goto uiZ; + } + expZ = expA; + sigZ = UINT64_C( 0x0020000000000000 ) + sigA + sigB; + sigZ <<= 9; + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + sigA <<= 9; + sigB <<= 9; + if ( expDiff < 0 ) { + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN; + uiZ = packToF64UI( signZ, 0x7FF, 0 ); + goto uiZ; + } + expZ = expB; + if ( expA ) { + sigA += UINT64_C( 0x2000000000000000 ); + } else { + sigA <<= 1; + } + sigA = softfloat_shiftRightJam64( sigA, -expDiff ); + } else { + if ( expA == 0x7FF ) { + if ( sigA ) goto propagateNaN; + uiZ = uiA; + goto uiZ; + } + expZ = expA; + if ( expB ) { + sigB += UINT64_C( 0x2000000000000000 ); + } else { + sigB <<= 1; + } + sigB = softfloat_shiftRightJam64( sigB, expDiff ); + } + sigZ = UINT64_C( 0x2000000000000000 ) + sigA + sigB; + if ( sigZ < UINT64_C( 0x4000000000000000 ) ) { + --expZ; + sigZ <<= 1; + } + } + return softfloat_roundPackToF64( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF64UI( uiA, uiB ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_approxRecip32_1.c b/vendor/riscv-isa-sim/softfloat/s_approxRecip32_1.c new file mode 100644 index 00000000..a06192ed --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_approxRecip32_1.c @@ -0,0 +1,66 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" + +#ifndef softfloat_approxRecip32_1 + +extern const uint16_t softfloat_approxRecip_1k0s[16]; +extern const uint16_t softfloat_approxRecip_1k1s[16]; + +uint32_t softfloat_approxRecip32_1( uint32_t a ) +{ + int index; + uint16_t eps, r0; + uint32_t sigma0; + uint_fast32_t r; + uint32_t sqrSigma0; + + index = a>>27 & 0xF; + eps = (uint16_t) (a>>11); + r0 = softfloat_approxRecip_1k0s[index] + - ((softfloat_approxRecip_1k1s[index] * (uint_fast32_t) eps)>>20); + sigma0 = ~(uint_fast32_t) ((r0 * (uint_fast64_t) a)>>7); + r = ((uint_fast32_t) r0<<16) + ((r0 * (uint_fast64_t) sigma0)>>24); + sqrSigma0 = ((uint_fast64_t) sigma0 * sigma0)>>32; + r += ((uint32_t) r * (uint_fast64_t) sqrSigma0)>>48; + return r; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_approxRecipSqrt32_1.c b/vendor/riscv-isa-sim/softfloat/s_approxRecipSqrt32_1.c new file mode 100644 index 00000000..2ab71a25 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_approxRecipSqrt32_1.c @@ -0,0 +1,73 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" + +#ifndef softfloat_approxRecipSqrt32_1 + +extern const uint16_t softfloat_approxRecipSqrt_1k0s[]; +extern const uint16_t softfloat_approxRecipSqrt_1k1s[]; + +uint32_t softfloat_approxRecipSqrt32_1( unsigned int oddExpA, uint32_t a ) +{ + int index; + uint16_t eps, r0; + uint_fast32_t ESqrR0; + uint32_t sigma0; + uint_fast32_t r; + uint32_t sqrSigma0; + + index = (a>>27 & 0xE) + oddExpA; + eps = (uint16_t) (a>>12); + r0 = softfloat_approxRecipSqrt_1k0s[index] + - ((softfloat_approxRecipSqrt_1k1s[index] * (uint_fast32_t) eps) + >>20); + ESqrR0 = (uint_fast32_t) r0 * r0; + if ( ! oddExpA ) ESqrR0 <<= 1; + sigma0 = ~(uint_fast32_t) (((uint32_t) ESqrR0 * (uint_fast64_t) a)>>23); + r = ((uint_fast32_t) r0<<16) + ((r0 * (uint_fast64_t) sigma0)>>25); + sqrSigma0 = ((uint_fast64_t) sigma0 * sigma0)>>32; + r += ((uint32_t) ((r>>1) + (r>>3) - ((uint_fast32_t) r0<<14)) + * (uint_fast64_t) sqrSigma0) + >>48; + if ( ! (r & 0x80000000) ) r = 0x80000000; + return r; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_approxRecipSqrt_1Ks.c b/vendor/riscv-isa-sim/softfloat/s_approxRecipSqrt_1Ks.c new file mode 100644 index 00000000..a60cf825 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_approxRecipSqrt_1Ks.c @@ -0,0 +1,49 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitives.h" + +const uint16_t softfloat_approxRecipSqrt_1k0s[16] = { + 0xB4C9, 0xFFAB, 0xAA7D, 0xF11C, 0xA1C5, 0xE4C7, 0x9A43, 0xDA29, + 0x93B5, 0xD0E5, 0x8DED, 0xC8B7, 0x88C6, 0xC16D, 0x8424, 0xBAE1 +}; +const uint16_t softfloat_approxRecipSqrt_1k1s[16] = { + 0xA5A5, 0xEA42, 0x8C21, 0xC62D, 0x788F, 0xAA7F, 0x6928, 0x94B6, + 0x5CC7, 0x8335, 0x52A6, 0x74E2, 0x4A3E, 0x68FE, 0x432B, 0x5EFD +}; + diff --git a/vendor/riscv-isa-sim/softfloat/s_approxRecip_1Ks.c b/vendor/riscv-isa-sim/softfloat/s_approxRecip_1Ks.c new file mode 100644 index 00000000..1108fcbe --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_approxRecip_1Ks.c @@ -0,0 +1,49 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitives.h" + +const uint16_t softfloat_approxRecip_1k0s[16] = { + 0xFFC4, 0xF0BE, 0xE363, 0xD76F, 0xCCAD, 0xC2F0, 0xBA16, 0xB201, + 0xAA97, 0xA3C6, 0x9D7A, 0x97A6, 0x923C, 0x8D32, 0x887E, 0x8417 +}; +const uint16_t softfloat_approxRecip_1k1s[16] = { + 0xF0F1, 0xD62C, 0xBFA1, 0xAC77, 0x9C0A, 0x8DDB, 0x8185, 0x76BA, + 0x6D3B, 0x64D4, 0x5D5C, 0x56B1, 0x50B6, 0x4B55, 0x4679, 0x4211 +}; + diff --git a/vendor/riscv-isa-sim/softfloat/s_commonNaNToF128UI.c b/vendor/riscv-isa-sim/softfloat/s_commonNaNToF128UI.c new file mode 100644 index 00000000..9b97f343 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_commonNaNToF128UI.c @@ -0,0 +1,56 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include "platform.h" +#include "primitiveTypes.h" + +#define softfloat_commonNaNToF128UI softfloat_commonNaNToF128UI +#include "specialize.h" + +/*---------------------------------------------------------------------------- +| Converts the common NaN pointed to by `aPtr' into a 128-bit floating-point +| NaN, and returns the bit pattern of this value as an unsigned integer. +*----------------------------------------------------------------------------*/ +struct uint128 softfloat_commonNaNToF128UI( const struct commonNaN *aPtr ) +{ + struct uint128 uiZ; + + uiZ.v64 = defaultNaNF128UI64; + uiZ.v0 = defaultNaNF128UI0; + return uiZ; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_commonNaNToF16UI.c b/vendor/riscv-isa-sim/softfloat/s_commonNaNToF16UI.c new file mode 100644 index 00000000..861b2696 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_commonNaNToF16UI.c @@ -0,0 +1,5 @@ + +/*---------------------------------------------------------------------------- +| This file intentionally contains no code. +*----------------------------------------------------------------------------*/ + diff --git a/vendor/riscv-isa-sim/softfloat/s_commonNaNToF32UI.c b/vendor/riscv-isa-sim/softfloat/s_commonNaNToF32UI.c new file mode 100644 index 00000000..861b2696 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_commonNaNToF32UI.c @@ -0,0 +1,5 @@ + +/*---------------------------------------------------------------------------- +| This file intentionally contains no code. +*----------------------------------------------------------------------------*/ + diff --git a/vendor/riscv-isa-sim/softfloat/s_commonNaNToF64UI.c b/vendor/riscv-isa-sim/softfloat/s_commonNaNToF64UI.c new file mode 100644 index 00000000..861b2696 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_commonNaNToF64UI.c @@ -0,0 +1,5 @@ + +/*---------------------------------------------------------------------------- +| This file intentionally contains no code. +*----------------------------------------------------------------------------*/ + diff --git a/vendor/riscv-isa-sim/softfloat/s_compare128M.c b/vendor/riscv-isa-sim/softfloat/s_compare128M.c new file mode 100644 index 00000000..c2819e20 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_compare128M.c @@ -0,0 +1,62 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_compare128M + +int_fast8_t softfloat_compare128M( const uint32_t *aPtr, const uint32_t *bPtr ) +{ + unsigned int index, lastIndex; + uint32_t wordA, wordB; + + index = indexWordHi( 4 ); + lastIndex = indexWordLo( 4 ); + for (;;) { + wordA = aPtr[index]; + wordB = bPtr[index]; + if ( wordA != wordB ) return (wordA < wordB) ? -1 : 1; + if ( index == lastIndex ) break; + index -= wordIncr; + } + return 0; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_compare96M.c b/vendor/riscv-isa-sim/softfloat/s_compare96M.c new file mode 100644 index 00000000..0dc39f5d --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_compare96M.c @@ -0,0 +1,62 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_compare96M + +int_fast8_t softfloat_compare96M( const uint32_t *aPtr, const uint32_t *bPtr ) +{ + unsigned int index, lastIndex; + uint32_t wordA, wordB; + + index = indexWordHi( 3 ); + lastIndex = indexWordLo( 3 ); + for (;;) { + wordA = aPtr[index]; + wordB = bPtr[index]; + if ( wordA != wordB ) return (wordA < wordB) ? -1 : 1; + if ( index == lastIndex ) break; + index -= wordIncr; + } + return 0; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_countLeadingZeros16.c b/vendor/riscv-isa-sim/softfloat/s_countLeadingZeros16.c new file mode 100644 index 00000000..950db6c8 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_countLeadingZeros16.c @@ -0,0 +1,60 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" + +#ifndef softfloat_countLeadingZeros16 + +#define softfloat_countLeadingZeros16 softfloat_countLeadingZeros16 +#include "primitives.h" + +uint_fast8_t softfloat_countLeadingZeros16( uint16_t a ) +{ + uint_fast8_t count; + + count = 8; + if ( 0x100 <= a ) { + count = 0; + a >>= 8; + } + count += softfloat_countLeadingZeros8[a]; + return count; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_countLeadingZeros32.c b/vendor/riscv-isa-sim/softfloat/s_countLeadingZeros32.c new file mode 100644 index 00000000..fbf8ab6a --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_countLeadingZeros32.c @@ -0,0 +1,64 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" + +#ifndef softfloat_countLeadingZeros32 + +#define softfloat_countLeadingZeros32 softfloat_countLeadingZeros32 +#include "primitives.h" + +uint_fast8_t softfloat_countLeadingZeros32( uint32_t a ) +{ + uint_fast8_t count; + + count = 0; + if ( a < 0x10000 ) { + count = 16; + a <<= 16; + } + if ( a < 0x1000000 ) { + count += 8; + a <<= 8; + } + count += softfloat_countLeadingZeros8[a>>24]; + return count; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_countLeadingZeros64.c b/vendor/riscv-isa-sim/softfloat/s_countLeadingZeros64.c new file mode 100644 index 00000000..00457418 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_countLeadingZeros64.c @@ -0,0 +1,73 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" + +#ifndef softfloat_countLeadingZeros64 + +#define softfloat_countLeadingZeros64 softfloat_countLeadingZeros64 +#include "primitives.h" + +uint_fast8_t softfloat_countLeadingZeros64( uint64_t a ) +{ + uint_fast8_t count; + uint32_t a32; + + count = 0; + a32 = a>>32; + if ( ! a32 ) { + count = 32; + a32 = a; + } + /*------------------------------------------------------------------------ + | From here, result is current count + count leading zeros of `a32'. + *------------------------------------------------------------------------*/ + if ( a32 < 0x10000 ) { + count += 16; + a32 <<= 16; + } + if ( a32 < 0x1000000 ) { + count += 8; + a32 <<= 8; + } + count += softfloat_countLeadingZeros8[a32>>24]; + return count; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_countLeadingZeros8.c b/vendor/riscv-isa-sim/softfloat/s_countLeadingZeros8.c new file mode 100644 index 00000000..1158d01c --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_countLeadingZeros8.c @@ -0,0 +1,59 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitives.h" + +const uint_least8_t softfloat_countLeadingZeros8[256] = { + 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +}; + diff --git a/vendor/riscv-isa-sim/softfloat/s_eq128.c b/vendor/riscv-isa-sim/softfloat/s_eq128.c new file mode 100644 index 00000000..625ef002 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_eq128.c @@ -0,0 +1,51 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" + +#ifndef softfloat_eq128 + +bool softfloat_eq128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ) +{ + + return (a64 == b64) && (a0 == b0); + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_f128UIToCommonNaN.c b/vendor/riscv-isa-sim/softfloat/s_f128UIToCommonNaN.c new file mode 100644 index 00000000..861b2696 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_f128UIToCommonNaN.c @@ -0,0 +1,5 @@ + +/*---------------------------------------------------------------------------- +| This file intentionally contains no code. +*----------------------------------------------------------------------------*/ + diff --git a/vendor/riscv-isa-sim/softfloat/s_f16UIToCommonNaN.c b/vendor/riscv-isa-sim/softfloat/s_f16UIToCommonNaN.c new file mode 100644 index 00000000..861b2696 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_f16UIToCommonNaN.c @@ -0,0 +1,5 @@ + +/*---------------------------------------------------------------------------- +| This file intentionally contains no code. +*----------------------------------------------------------------------------*/ + diff --git a/vendor/riscv-isa-sim/softfloat/s_f32UIToCommonNaN.c b/vendor/riscv-isa-sim/softfloat/s_f32UIToCommonNaN.c new file mode 100644 index 00000000..861b2696 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_f32UIToCommonNaN.c @@ -0,0 +1,5 @@ + +/*---------------------------------------------------------------------------- +| This file intentionally contains no code. +*----------------------------------------------------------------------------*/ + diff --git a/vendor/riscv-isa-sim/softfloat/s_f64UIToCommonNaN.c b/vendor/riscv-isa-sim/softfloat/s_f64UIToCommonNaN.c new file mode 100644 index 00000000..861b2696 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_f64UIToCommonNaN.c @@ -0,0 +1,5 @@ + +/*---------------------------------------------------------------------------- +| This file intentionally contains no code. +*----------------------------------------------------------------------------*/ + diff --git a/vendor/riscv-isa-sim/softfloat/s_le128.c b/vendor/riscv-isa-sim/softfloat/s_le128.c new file mode 100644 index 00000000..7261012f --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_le128.c @@ -0,0 +1,51 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" + +#ifndef softfloat_le128 + +bool softfloat_le128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ) +{ + + return (a64 < b64) || ((a64 == b64) && (a0 <= b0)); + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_lt128.c b/vendor/riscv-isa-sim/softfloat/s_lt128.c new file mode 100644 index 00000000..0d461c36 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_lt128.c @@ -0,0 +1,51 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" + +#ifndef softfloat_lt128 + +bool softfloat_lt128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ) +{ + + return (a64 < b64) || ((a64 == b64) && (a0 < b0)); + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_mul128By32.c b/vendor/riscv-isa-sim/softfloat/s_mul128By32.c new file mode 100644 index 00000000..6e71dd0c --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_mul128By32.c @@ -0,0 +1,58 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_mul128By32 + +struct uint128 softfloat_mul128By32( uint64_t a64, uint64_t a0, uint32_t b ) +{ + struct uint128 z; + uint_fast64_t mid; + uint_fast32_t carry; + + z.v0 = a0 * b; + mid = (uint_fast64_t) (uint32_t) (a0>>32) * b; + carry = (uint32_t) ((uint_fast32_t) (z.v0>>32) - (uint_fast32_t) mid); + z.v64 = a64 * b + (uint_fast32_t) ((mid + carry)>>32); + return z; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_mul128MTo256M.c b/vendor/riscv-isa-sim/softfloat/s_mul128MTo256M.c new file mode 100644 index 00000000..49a1d294 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_mul128MTo256M.c @@ -0,0 +1,100 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_mul128MTo256M + +void + softfloat_mul128MTo256M( + const uint32_t *aPtr, const uint32_t *bPtr, uint32_t *zPtr ) +{ + uint32_t *lastZPtr, wordB; + uint64_t dwordProd; + uint32_t wordZ; + uint_fast8_t carry; + + bPtr += indexWordLo( 4 ); + lastZPtr = zPtr + indexMultiwordHi( 8, 5 ); + zPtr += indexMultiwordLo( 8, 5 ); + wordB = *bPtr; + dwordProd = (uint64_t) aPtr[indexWord( 4, 0 )] * wordB; + zPtr[indexWord( 5, 0 )] = dwordProd; + dwordProd = (uint64_t) aPtr[indexWord( 4, 1 )] * wordB + (dwordProd>>32); + zPtr[indexWord( 5, 1 )] = dwordProd; + dwordProd = (uint64_t) aPtr[indexWord( 4, 2 )] * wordB + (dwordProd>>32); + zPtr[indexWord( 5, 2 )] = dwordProd; + dwordProd = (uint64_t) aPtr[indexWord( 4, 3 )] * wordB + (dwordProd>>32); + zPtr[indexWord( 5, 3 )] = dwordProd; + zPtr[indexWord( 5, 4 )] = dwordProd>>32; + do { + bPtr += wordIncr; + zPtr += wordIncr; + wordB = *bPtr; + dwordProd = (uint64_t) aPtr[indexWord( 4, 0 )] * wordB; + wordZ = zPtr[indexWord( 5, 0 )] + (uint32_t) dwordProd; + zPtr[indexWord( 5, 0 )] = wordZ; + carry = (wordZ < (uint32_t) dwordProd); + dwordProd = + (uint64_t) aPtr[indexWord( 4, 1 )] * wordB + (dwordProd>>32); + wordZ = zPtr[indexWord( 5, 1 )] + (uint32_t) dwordProd + carry; + zPtr[indexWord( 5, 1 )] = wordZ; + if ( wordZ != (uint32_t) dwordProd ) { + carry = (wordZ < (uint32_t) dwordProd); + } + dwordProd = + (uint64_t) aPtr[indexWord( 4, 2 )] * wordB + (dwordProd>>32); + wordZ = zPtr[indexWord( 5, 2 )] + (uint32_t) dwordProd + carry; + zPtr[indexWord( 5, 2 )] = wordZ; + if ( wordZ != (uint32_t) dwordProd ) { + carry = (wordZ < (uint32_t) dwordProd); + } + dwordProd = + (uint64_t) aPtr[indexWord( 4, 3 )] * wordB + (dwordProd>>32); + wordZ = zPtr[indexWord( 5, 3 )] + (uint32_t) dwordProd + carry; + zPtr[indexWord( 5, 3 )] = wordZ; + if ( wordZ != (uint32_t) dwordProd ) { + carry = (wordZ < (uint32_t) dwordProd); + } + zPtr[indexWord( 5, 4 )] = (dwordProd>>32) + carry; + } while ( zPtr != lastZPtr ); + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_mul128To256M.c b/vendor/riscv-isa-sim/softfloat/s_mul128To256M.c new file mode 100644 index 00000000..fccc2a69 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_mul128To256M.c @@ -0,0 +1,71 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" + +#ifndef softfloat_mul128To256M + +#define softfloat_mul128To256M softfloat_mul128To256M +#include "primitives.h" + +void + softfloat_mul128To256M( + uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0, uint64_t *zPtr ) +{ + struct uint128 p0, p64, p128; + uint_fast64_t z64, z128, z192; + + p0 = softfloat_mul64To128( a0, b0 ); + zPtr[indexWord( 4, 0 )] = p0.v0; + p64 = softfloat_mul64To128( a64, b0 ); + z64 = p64.v0 + p0.v64; + z128 = p64.v64 + (z64 < p64.v0); + p128 = softfloat_mul64To128( a64, b64 ); + z128 += p128.v0; + z192 = p128.v64 + (z128 < p128.v0); + p64 = softfloat_mul64To128( a0, b64 ); + z64 += p64.v0; + zPtr[indexWord( 4, 1 )] = z64; + p64.v64 += (z64 < p64.v0); + z128 += p64.v64; + zPtr[indexWord( 4, 2 )] = z128; + zPtr[indexWord( 4, 3 )] = z192 + (z128 < p64.v64); + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_mul64ByShifted32To128.c b/vendor/riscv-isa-sim/softfloat/s_mul64ByShifted32To128.c new file mode 100644 index 00000000..f7e7104e --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_mul64ByShifted32To128.c @@ -0,0 +1,56 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_mul64ByShifted32To128 + +struct uint128 softfloat_mul64ByShifted32To128( uint64_t a, uint32_t b ) +{ + uint_fast64_t mid; + struct uint128 z; + + mid = (uint_fast64_t) (uint32_t) a * b; + z.v0 = mid<<32; + z.v64 = (uint_fast64_t) (uint32_t) (a>>32) * b + (mid>>32); + return z; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_mul64To128.c b/vendor/riscv-isa-sim/softfloat/s_mul64To128.c new file mode 100644 index 00000000..6620a20b --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_mul64To128.c @@ -0,0 +1,66 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_mul64To128 + +struct uint128 softfloat_mul64To128( uint64_t a, uint64_t b ) +{ + uint32_t a32, a0, b32, b0; + struct uint128 z; + uint64_t mid1, mid; + + a32 = a>>32; + a0 = a; + b32 = b>>32; + b0 = b; + z.v0 = (uint_fast64_t) a0 * b0; + mid1 = (uint_fast64_t) a32 * b0; + mid = mid1 + (uint_fast64_t) a0 * b32; + z.v64 = (uint_fast64_t) a32 * b32; + z.v64 += (uint_fast64_t) (mid < mid1)<<32 | mid>>32; + mid <<= 32; + z.v0 += mid; + z.v64 += (z.v0 < mid); + return z; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_mul64To128M.c b/vendor/riscv-isa-sim/softfloat/s_mul64To128M.c new file mode 100644 index 00000000..e3f9a481 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_mul64To128M.c @@ -0,0 +1,68 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_mul64To128M + +void softfloat_mul64To128M( uint64_t a, uint64_t b, uint32_t *zPtr ) +{ + uint32_t a32, a0, b32, b0; + uint64_t z0, mid1, z64, mid; + + a32 = a>>32; + a0 = a; + b32 = b>>32; + b0 = b; + z0 = (uint64_t) a0 * b0; + mid1 = (uint64_t) a32 * b0; + mid = mid1 + (uint64_t) a0 * b32; + z64 = (uint64_t) a32 * b32; + z64 += (uint64_t) (mid < mid1)<<32 | mid>>32; + mid <<= 32; + z0 += mid; + zPtr[indexWord( 4, 1 )] = z0>>32; + zPtr[indexWord( 4, 0 )] = z0; + z64 += (z0 < mid); + zPtr[indexWord( 4, 3 )] = z64>>32; + zPtr[indexWord( 4, 2 )] = z64; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_mulAddF128.c b/vendor/riscv-isa-sim/softfloat/s_mulAddF128.c new file mode 100644 index 00000000..877b33d2 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_mulAddF128.c @@ -0,0 +1,350 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float128_t + softfloat_mulAddF128( + uint_fast64_t uiA64, + uint_fast64_t uiA0, + uint_fast64_t uiB64, + uint_fast64_t uiB0, + uint_fast64_t uiC64, + uint_fast64_t uiC0, + uint_fast8_t op + ) +{ + bool signA; + int_fast32_t expA; + struct uint128 sigA; + bool signB; + int_fast32_t expB; + struct uint128 sigB; + bool signC; + int_fast32_t expC; + struct uint128 sigC; + bool signZ; + uint_fast64_t magBits; + struct uint128 uiZ; + struct exp32_sig128 normExpSig; + int_fast32_t expZ; + uint64_t sig256Z[4]; + struct uint128 sigZ; + int_fast32_t shiftDist, expDiff; + struct uint128 x128; + uint64_t sig256C[4]; + static uint64_t zero256[4] = INIT_UINTM4( 0, 0, 0, 0 ); + uint_fast64_t sigZExtra, sig256Z0; + union ui128_f128 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + signA = signF128UI64( uiA64 ); + expA = expF128UI64( uiA64 ); + sigA.v64 = fracF128UI64( uiA64 ); + sigA.v0 = uiA0; + signB = signF128UI64( uiB64 ); + expB = expF128UI64( uiB64 ); + sigB.v64 = fracF128UI64( uiB64 ); + sigB.v0 = uiB0; + signC = signF128UI64( uiC64 ) ^ (op == softfloat_mulAdd_subC); + expC = expF128UI64( uiC64 ); + sigC.v64 = fracF128UI64( uiC64 ); + sigC.v0 = uiC0; + signZ = signA ^ signB ^ (op == softfloat_mulAdd_subProd); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FFF ) { + if ( + (sigA.v64 | sigA.v0) || ((expB == 0x7FFF) && (sigB.v64 | sigB.v0)) + ) { + goto propagateNaN_ABC; + } + magBits = expB | sigB.v64 | sigB.v0; + goto infProdArg; + } + if ( expB == 0x7FFF ) { + if ( sigB.v64 | sigB.v0 ) goto propagateNaN_ABC; + magBits = expA | sigA.v64 | sigA.v0; + goto infProdArg; + } + if ( expC == 0x7FFF ) { + if ( sigC.v64 | sigC.v0 ) { + uiZ.v64 = 0; + uiZ.v0 = 0; + goto propagateNaN_ZC; + } + uiZ.v64 = uiC64; + uiZ.v0 = uiC0; + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! (sigA.v64 | sigA.v0) ) goto zeroProd; + normExpSig = softfloat_normSubnormalF128Sig( sigA.v64, sigA.v0 ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + if ( ! expB ) { + if ( ! (sigB.v64 | sigB.v0) ) goto zeroProd; + normExpSig = softfloat_normSubnormalF128Sig( sigB.v64, sigB.v0 ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA + expB - 0x3FFE; + sigA.v64 |= UINT64_C( 0x0001000000000000 ); + sigB.v64 |= UINT64_C( 0x0001000000000000 ); + sigA = softfloat_shortShiftLeft128( sigA.v64, sigA.v0, 8 ); + sigB = softfloat_shortShiftLeft128( sigB.v64, sigB.v0, 15 ); + softfloat_mul128To256M( sigA.v64, sigA.v0, sigB.v64, sigB.v0, sig256Z ); + sigZ.v64 = sig256Z[indexWord( 4, 3 )]; + sigZ.v0 = sig256Z[indexWord( 4, 2 )]; + shiftDist = 0; + if ( ! (sigZ.v64 & UINT64_C( 0x0100000000000000 )) ) { + --expZ; + shiftDist = -1; + } + if ( ! expC ) { + if ( ! (sigC.v64 | sigC.v0) ) { + shiftDist += 8; + goto sigZ; + } + normExpSig = softfloat_normSubnormalF128Sig( sigC.v64, sigC.v0 ); + expC = normExpSig.exp; + sigC = normExpSig.sig; + } + sigC.v64 |= UINT64_C( 0x0001000000000000 ); + sigC = softfloat_shortShiftLeft128( sigC.v64, sigC.v0, 8 ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expZ - expC; + if ( expDiff < 0 ) { + expZ = expC; + if ( (signZ == signC) || (expDiff < -1) ) { + shiftDist -= expDiff; + if ( shiftDist ) { + sigZ = + softfloat_shiftRightJam128( sigZ.v64, sigZ.v0, shiftDist ); + } + } else { + if ( ! shiftDist ) { + x128 = + softfloat_shortShiftRight128( + sig256Z[indexWord( 4, 1 )], sig256Z[indexWord( 4, 0 )], + 1 + ); + sig256Z[indexWord( 4, 1 )] = (sigZ.v0<<63) | x128.v64; + sig256Z[indexWord( 4, 0 )] = x128.v0; + sigZ = softfloat_shortShiftRight128( sigZ.v64, sigZ.v0, 1 ); + sig256Z[indexWord( 4, 3 )] = sigZ.v64; + sig256Z[indexWord( 4, 2 )] = sigZ.v0; + } + } + } else { + if ( shiftDist ) softfloat_add256M( sig256Z, sig256Z, sig256Z ); + if ( ! expDiff ) { + sigZ.v64 = sig256Z[indexWord( 4, 3 )]; + sigZ.v0 = sig256Z[indexWord( 4, 2 )]; + } else { + sig256C[indexWord( 4, 3 )] = sigC.v64; + sig256C[indexWord( 4, 2 )] = sigC.v0; + sig256C[indexWord( 4, 1 )] = 0; + sig256C[indexWord( 4, 0 )] = 0; + softfloat_shiftRightJam256M( sig256C, expDiff, sig256C ); + } + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 8; + if ( signZ == signC ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expDiff <= 0 ) { + sigZ = softfloat_add128( sigC.v64, sigC.v0, sigZ.v64, sigZ.v0 ); + } else { + softfloat_add256M( sig256Z, sig256C, sig256Z ); + sigZ.v64 = sig256Z[indexWord( 4, 3 )]; + sigZ.v0 = sig256Z[indexWord( 4, 2 )]; + } + if ( sigZ.v64 & UINT64_C( 0x0200000000000000 ) ) { + ++expZ; + shiftDist = 9; + } + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expDiff < 0 ) { + signZ = signC; + if ( expDiff < -1 ) { + sigZ = + softfloat_sub128( sigC.v64, sigC.v0, sigZ.v64, sigZ.v0 ); + sigZExtra = + sig256Z[indexWord( 4, 1 )] | sig256Z[indexWord( 4, 0 )]; + if ( sigZExtra ) { + sigZ = softfloat_sub128( sigZ.v64, sigZ.v0, 0, 1 ); + } + if ( ! (sigZ.v64 & UINT64_C( 0x0100000000000000 )) ) { + --expZ; + shiftDist = 7; + } + goto shiftRightRoundPack; + } else { + sig256C[indexWord( 4, 3 )] = sigC.v64; + sig256C[indexWord( 4, 2 )] = sigC.v0; + sig256C[indexWord( 4, 1 )] = 0; + sig256C[indexWord( 4, 0 )] = 0; + softfloat_sub256M( sig256C, sig256Z, sig256Z ); + } + } else if ( ! expDiff ) { + sigZ = softfloat_sub128( sigZ.v64, sigZ.v0, sigC.v64, sigC.v0 ); + if ( + ! (sigZ.v64 | sigZ.v0) && ! sig256Z[indexWord( 4, 1 )] + && ! sig256Z[indexWord( 4, 0 )] + ) { + goto completeCancellation; + } + sig256Z[indexWord( 4, 3 )] = sigZ.v64; + sig256Z[indexWord( 4, 2 )] = sigZ.v0; + if ( sigZ.v64 & UINT64_C( 0x8000000000000000 ) ) { + signZ = ! signZ; + softfloat_sub256M( zero256, sig256Z, sig256Z ); + } + } else { + softfloat_sub256M( sig256Z, sig256C, sig256Z ); + if ( 1 < expDiff ) { + sigZ.v64 = sig256Z[indexWord( 4, 3 )]; + sigZ.v0 = sig256Z[indexWord( 4, 2 )]; + if ( ! (sigZ.v64 & UINT64_C( 0x0100000000000000 )) ) { + --expZ; + shiftDist = 7; + } + goto sigZ; + } + } + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + sigZ.v64 = sig256Z[indexWord( 4, 3 )]; + sigZ.v0 = sig256Z[indexWord( 4, 2 )]; + sigZExtra = sig256Z[indexWord( 4, 1 )]; + sig256Z0 = sig256Z[indexWord( 4, 0 )]; + if ( sigZ.v64 ) { + if ( sig256Z0 ) sigZExtra |= 1; + } else { + expZ -= 64; + sigZ.v64 = sigZ.v0; + sigZ.v0 = sigZExtra; + sigZExtra = sig256Z0; + if ( ! sigZ.v64 ) { + expZ -= 64; + sigZ.v64 = sigZ.v0; + sigZ.v0 = sigZExtra; + sigZExtra = 0; + if ( ! sigZ.v64 ) { + expZ -= 64; + sigZ.v64 = sigZ.v0; + sigZ.v0 = 0; + } + } + } + shiftDist = softfloat_countLeadingZeros64( sigZ.v64 ); + expZ += 7 - shiftDist; + shiftDist = 15 - shiftDist; + if ( 0 < shiftDist ) goto shiftRightRoundPack; + if ( shiftDist ) { + shiftDist = -shiftDist; + sigZ = softfloat_shortShiftLeft128( sigZ.v64, sigZ.v0, shiftDist ); + x128 = softfloat_shortShiftLeft128( 0, sigZExtra, shiftDist ); + sigZ.v0 |= x128.v64; + sigZExtra = x128.v0; + } + goto roundPack; + } + sigZ: + sigZExtra = sig256Z[indexWord( 4, 1 )] | sig256Z[indexWord( 4, 0 )]; + shiftRightRoundPack: + sigZExtra = (uint64_t) (sigZ.v0<<(64 - shiftDist)) | (sigZExtra != 0); + sigZ = softfloat_shortShiftRight128( sigZ.v64, sigZ.v0, shiftDist ); + roundPack: + return + softfloat_roundPackToF128( + signZ, expZ - 1, sigZ.v64, sigZ.v0, sigZExtra ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN_ABC: + uiZ = softfloat_propagateNaNF128UI( uiA64, uiA0, uiB64, uiB0 ); + goto propagateNaN_ZC; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infProdArg: + if ( magBits ) { + uiZ.v64 = packToF128UI64( signZ, 0x7FFF, 0 ); + uiZ.v0 = 0; + if ( expC != 0x7FFF ) goto uiZ; + if ( sigC.v64 | sigC.v0 ) goto propagateNaN_ZC; + if ( signZ == signC ) goto uiZ; + } + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ.v64 = defaultNaNF128UI64; + uiZ.v0 = defaultNaNF128UI0; + propagateNaN_ZC: + uiZ = softfloat_propagateNaNF128UI( uiZ.v64, uiZ.v0, uiC64, uiC0 ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zeroProd: + uiZ.v64 = uiC64; + uiZ.v0 = uiC0; + if ( ! (expC | sigC.v64 | sigC.v0) && (signZ != signC) ) { + completeCancellation: + uiZ.v64 = + packToF128UI64( + (softfloat_roundingMode == softfloat_round_min), 0, 0 ); + uiZ.v0 = 0; + } + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_mulAddF16.c b/vendor/riscv-isa-sim/softfloat/s_mulAddF16.c new file mode 100644 index 00000000..b6040072 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_mulAddF16.c @@ -0,0 +1,226 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float16_t + softfloat_mulAddF16( + uint_fast16_t uiA, uint_fast16_t uiB, uint_fast16_t uiC, uint_fast8_t op ) +{ + bool signA; + int_fast8_t expA; + uint_fast16_t sigA; + bool signB; + int_fast8_t expB; + uint_fast16_t sigB; + bool signC; + int_fast8_t expC; + uint_fast16_t sigC; + bool signProd; + uint_fast16_t magBits, uiZ; + struct exp8_sig16 normExpSig; + int_fast8_t expProd; + uint_fast32_t sigProd; + bool signZ; + int_fast8_t expZ; + uint_fast16_t sigZ; + int_fast8_t expDiff; + uint_fast32_t sig32Z, sig32C; + int_fast8_t shiftDist; + union ui16_f16 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + signA = signF16UI( uiA ); + expA = expF16UI( uiA ); + sigA = fracF16UI( uiA ); + signB = signF16UI( uiB ); + expB = expF16UI( uiB ); + sigB = fracF16UI( uiB ); + signC = signF16UI( uiC ) ^ (op == softfloat_mulAdd_subC); + expC = expF16UI( uiC ); + sigC = fracF16UI( uiC ); + signProd = signA ^ signB ^ (op == softfloat_mulAdd_subProd); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x1F ) { + if ( sigA || ((expB == 0x1F) && sigB) ) goto propagateNaN_ABC; + magBits = expB | sigB; + goto infProdArg; + } + if ( expB == 0x1F ) { + if ( sigB ) goto propagateNaN_ABC; + magBits = expA | sigA; + goto infProdArg; + } + if ( expC == 0x1F ) { + if ( sigC ) { + uiZ = 0; + goto propagateNaN_ZC; + } + uiZ = uiC; + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) goto zeroProd; + normExpSig = softfloat_normSubnormalF16Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + if ( ! expB ) { + if ( ! sigB ) goto zeroProd; + normExpSig = softfloat_normSubnormalF16Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expProd = expA + expB - 0xE; + sigA = (sigA | 0x0400)<<4; + sigB = (sigB | 0x0400)<<4; + sigProd = (uint_fast32_t) sigA * sigB; + if ( sigProd < 0x20000000 ) { + --expProd; + sigProd <<= 1; + } + signZ = signProd; + if ( ! expC ) { + if ( ! sigC ) { + expZ = expProd - 1; + sigZ = sigProd>>15 | ((sigProd & 0x7FFF) != 0); + goto roundPack; + } + normExpSig = softfloat_normSubnormalF16Sig( sigC ); + expC = normExpSig.exp; + sigC = normExpSig.sig; + } + sigC = (sigC | 0x0400)<<3; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expProd - expC; + if ( signProd == signC ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expDiff <= 0 ) { + expZ = expC; + sigZ = sigC + softfloat_shiftRightJam32( sigProd, 16 - expDiff ); + } else { + expZ = expProd; + sig32Z = + sigProd + + softfloat_shiftRightJam32( + (uint_fast32_t) sigC<<16, expDiff ); + sigZ = sig32Z>>16 | ((sig32Z & 0xFFFF) != 0 ); + } + if ( sigZ < 0x4000 ) { + --expZ; + sigZ <<= 1; + } + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + sig32C = (uint_fast32_t) sigC<<16; + if ( expDiff < 0 ) { + signZ = signC; + expZ = expC; + sig32Z = sig32C - softfloat_shiftRightJam32( sigProd, -expDiff ); + } else if ( ! expDiff ) { + expZ = expProd; + sig32Z = sigProd - sig32C; + if ( ! sig32Z ) goto completeCancellation; + if ( sig32Z & 0x80000000 ) { + signZ = ! signZ; + sig32Z = -sig32Z; + } + } else { + expZ = expProd; + sig32Z = sigProd - softfloat_shiftRightJam32( sig32C, expDiff ); + } + shiftDist = softfloat_countLeadingZeros32( sig32Z ) - 1; + expZ -= shiftDist; + shiftDist -= 16; + if ( shiftDist < 0 ) { + sigZ = + sig32Z>>(-shiftDist) + | ((uint32_t) (sig32Z<<(shiftDist & 31)) != 0); + } else { + sigZ = (uint_fast16_t) sig32Z< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t + softfloat_mulAddF32( + uint_fast32_t uiA, uint_fast32_t uiB, uint_fast32_t uiC, uint_fast8_t op ) +{ + bool signA; + int_fast16_t expA; + uint_fast32_t sigA; + bool signB; + int_fast16_t expB; + uint_fast32_t sigB; + bool signC; + int_fast16_t expC; + uint_fast32_t sigC; + bool signProd; + uint_fast32_t magBits, uiZ; + struct exp16_sig32 normExpSig; + int_fast16_t expProd; + uint_fast64_t sigProd; + bool signZ; + int_fast16_t expZ; + uint_fast32_t sigZ; + int_fast16_t expDiff; + uint_fast64_t sig64Z, sig64C; + int_fast8_t shiftDist; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + signA = signF32UI( uiA ); + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + signB = signF32UI( uiB ); + expB = expF32UI( uiB ); + sigB = fracF32UI( uiB ); + signC = signF32UI( uiC ) ^ (op == softfloat_mulAdd_subC); + expC = expF32UI( uiC ); + sigC = fracF32UI( uiC ); + signProd = signA ^ signB ^ (op == softfloat_mulAdd_subProd); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0xFF ) { + if ( sigA || ((expB == 0xFF) && sigB) ) goto propagateNaN_ABC; + magBits = expB | sigB; + goto infProdArg; + } + if ( expB == 0xFF ) { + if ( sigB ) goto propagateNaN_ABC; + magBits = expA | sigA; + goto infProdArg; + } + if ( expC == 0xFF ) { + if ( sigC ) { + uiZ = 0; + goto propagateNaN_ZC; + } + uiZ = uiC; + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) goto zeroProd; + normExpSig = softfloat_normSubnormalF32Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + if ( ! expB ) { + if ( ! sigB ) goto zeroProd; + normExpSig = softfloat_normSubnormalF32Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expProd = expA + expB - 0x7E; + sigA = (sigA | 0x00800000)<<7; + sigB = (sigB | 0x00800000)<<7; + sigProd = (uint_fast64_t) sigA * sigB; + if ( sigProd < UINT64_C( 0x2000000000000000 ) ) { + --expProd; + sigProd <<= 1; + } + signZ = signProd; + if ( ! expC ) { + if ( ! sigC ) { + expZ = expProd - 1; + sigZ = softfloat_shortShiftRightJam64( sigProd, 31 ); + goto roundPack; + } + normExpSig = softfloat_normSubnormalF32Sig( sigC ); + expC = normExpSig.exp; + sigC = normExpSig.sig; + } + sigC = (sigC | 0x00800000)<<6; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expProd - expC; + if ( signProd == signC ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expDiff <= 0 ) { + expZ = expC; + sigZ = sigC + softfloat_shiftRightJam64( sigProd, 32 - expDiff ); + } else { + expZ = expProd; + sig64Z = + sigProd + + softfloat_shiftRightJam64( + (uint_fast64_t) sigC<<32, expDiff ); + sigZ = softfloat_shortShiftRightJam64( sig64Z, 32 ); + } + if ( sigZ < 0x40000000 ) { + --expZ; + sigZ <<= 1; + } + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + sig64C = (uint_fast64_t) sigC<<32; + if ( expDiff < 0 ) { + signZ = signC; + expZ = expC; + sig64Z = sig64C - softfloat_shiftRightJam64( sigProd, -expDiff ); + } else if ( ! expDiff ) { + expZ = expProd; + sig64Z = sigProd - sig64C; + if ( ! sig64Z ) goto completeCancellation; + if ( sig64Z & UINT64_C( 0x8000000000000000 ) ) { + signZ = ! signZ; + sig64Z = -sig64Z; + } + } else { + expZ = expProd; + sig64Z = sigProd - softfloat_shiftRightJam64( sig64C, expDiff ); + } + shiftDist = softfloat_countLeadingZeros64( sig64Z ) - 1; + expZ -= shiftDist; + shiftDist -= 32; + if ( shiftDist < 0 ) { + sigZ = softfloat_shortShiftRightJam64( sig64Z, -shiftDist ); + } else { + sigZ = (uint_fast32_t) sig64Z< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +#ifdef SOFTFLOAT_FAST_INT64 + +float64_t + softfloat_mulAddF64( + uint_fast64_t uiA, uint_fast64_t uiB, uint_fast64_t uiC, uint_fast8_t op ) +{ + bool signA; + int_fast16_t expA; + uint_fast64_t sigA; + bool signB; + int_fast16_t expB; + uint_fast64_t sigB; + bool signC; + int_fast16_t expC; + uint_fast64_t sigC; + bool signZ; + uint_fast64_t magBits, uiZ; + struct exp16_sig64 normExpSig; + int_fast16_t expZ; + struct uint128 sig128Z; + uint_fast64_t sigZ; + int_fast16_t expDiff; + struct uint128 sig128C; + int_fast8_t shiftDist; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + signA = signF64UI( uiA ); + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + signB = signF64UI( uiB ); + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + signC = signF64UI( uiC ) ^ (op == softfloat_mulAdd_subC); + expC = expF64UI( uiC ); + sigC = fracF64UI( uiC ); + signZ = signA ^ signB ^ (op == softfloat_mulAdd_subProd); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FF ) { + if ( sigA || ((expB == 0x7FF) && sigB) ) goto propagateNaN_ABC; + magBits = expB | sigB; + goto infProdArg; + } + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN_ABC; + magBits = expA | sigA; + goto infProdArg; + } + if ( expC == 0x7FF ) { + if ( sigC ) { + uiZ = 0; + goto propagateNaN_ZC; + } + uiZ = uiC; + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) goto zeroProd; + normExpSig = softfloat_normSubnormalF64Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + if ( ! expB ) { + if ( ! sigB ) goto zeroProd; + normExpSig = softfloat_normSubnormalF64Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA + expB - 0x3FE; + sigA = (sigA | UINT64_C( 0x0010000000000000 ))<<10; + sigB = (sigB | UINT64_C( 0x0010000000000000 ))<<10; + sig128Z = softfloat_mul64To128( sigA, sigB ); + if ( sig128Z.v64 < UINT64_C( 0x2000000000000000 ) ) { + --expZ; + sig128Z = + softfloat_add128( + sig128Z.v64, sig128Z.v0, sig128Z.v64, sig128Z.v0 ); + } + if ( ! expC ) { + if ( ! sigC ) { + --expZ; + sigZ = sig128Z.v64<<1 | (sig128Z.v0 != 0); + goto roundPack; + } + normExpSig = softfloat_normSubnormalF64Sig( sigC ); + expC = normExpSig.exp; + sigC = normExpSig.sig; + } + sigC = (sigC | UINT64_C( 0x0010000000000000 ))<<9; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expZ - expC; + if ( expDiff < 0 ) { + expZ = expC; + if ( (signZ == signC) || (expDiff < -1) ) { + sig128Z.v64 = softfloat_shiftRightJam64( sig128Z.v64, -expDiff ); + } else { + sig128Z = + softfloat_shortShiftRightJam128( sig128Z.v64, sig128Z.v0, 1 ); + } + } else if ( expDiff ) { + sig128C = softfloat_shiftRightJam128( sigC, 0, expDiff ); + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( signZ == signC ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expDiff <= 0 ) { + sigZ = (sigC + sig128Z.v64) | (sig128Z.v0 != 0); + } else { + sig128Z = + softfloat_add128( + sig128Z.v64, sig128Z.v0, sig128C.v64, sig128C.v0 ); + sigZ = sig128Z.v64 | (sig128Z.v0 != 0); + } + if ( sigZ < UINT64_C( 0x4000000000000000 ) ) { + --expZ; + sigZ <<= 1; + } + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expDiff < 0 ) { + signZ = signC; + sig128Z = softfloat_sub128( sigC, 0, sig128Z.v64, sig128Z.v0 ); + } else if ( ! expDiff ) { + sig128Z.v64 = sig128Z.v64 - sigC; + if ( ! (sig128Z.v64 | sig128Z.v0) ) goto completeCancellation; + if ( sig128Z.v64 & UINT64_C( 0x8000000000000000 ) ) { + signZ = ! signZ; + sig128Z = softfloat_sub128( 0, 0, sig128Z.v64, sig128Z.v0 ); + } + } else { + sig128Z = + softfloat_sub128( + sig128Z.v64, sig128Z.v0, sig128C.v64, sig128C.v0 ); + } + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( ! sig128Z.v64 ) { + expZ -= 64; + sig128Z.v64 = sig128Z.v0; + sig128Z.v0 = 0; + } + shiftDist = softfloat_countLeadingZeros64( sig128Z.v64 ) - 1; + expZ -= shiftDist; + if ( shiftDist < 0 ) { + sigZ = softfloat_shortShiftRightJam64( sig128Z.v64, -shiftDist ); + } else { + sig128Z = + softfloat_shortShiftLeft128( + sig128Z.v64, sig128Z.v0, shiftDist ); + sigZ = sig128Z.v64; + } + sigZ |= (sig128Z.v0 != 0); + } + roundPack: + return softfloat_roundPackToF64( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN_ABC: + uiZ = softfloat_propagateNaNF64UI( uiA, uiB ); + goto propagateNaN_ZC; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infProdArg: + if ( magBits ) { + uiZ = packToF64UI( signZ, 0x7FF, 0 ); + if ( expC != 0x7FF ) goto uiZ; + if ( sigC ) goto propagateNaN_ZC; + if ( signZ == signC ) goto uiZ; + } + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF64UI; + propagateNaN_ZC: + uiZ = softfloat_propagateNaNF64UI( uiZ, uiC ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zeroProd: + uiZ = uiC; + if ( ! (expC | sigC) && (signZ != signC) ) { + completeCancellation: + uiZ = + packToF64UI( + (softfloat_roundingMode == softfloat_round_min), 0, 0 ); + } + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + +#else + +float64_t + softfloat_mulAddF64( + uint_fast64_t uiA, uint_fast64_t uiB, uint_fast64_t uiC, uint_fast8_t op ) +{ + bool signA; + int_fast16_t expA; + uint64_t sigA; + bool signB; + int_fast16_t expB; + uint64_t sigB; + bool signC; + int_fast16_t expC; + uint64_t sigC; + bool signZ; + uint64_t magBits, uiZ; + struct exp16_sig64 normExpSig; + int_fast16_t expZ; + uint32_t sig128Z[4]; + uint64_t sigZ; + int_fast16_t shiftDist, expDiff; + uint32_t sig128C[4]; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + signA = signF64UI( uiA ); + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + signB = signF64UI( uiB ); + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + signC = signF64UI( uiC ) ^ (op == softfloat_mulAdd_subC); + expC = expF64UI( uiC ); + sigC = fracF64UI( uiC ); + signZ = signA ^ signB ^ (op == softfloat_mulAdd_subProd); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FF ) { + if ( sigA || ((expB == 0x7FF) && sigB) ) goto propagateNaN_ABC; + magBits = expB | sigB; + goto infProdArg; + } + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN_ABC; + magBits = expA | sigA; + goto infProdArg; + } + if ( expC == 0x7FF ) { + if ( sigC ) { + uiZ = 0; + goto propagateNaN_ZC; + } + uiZ = uiC; + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) goto zeroProd; + normExpSig = softfloat_normSubnormalF64Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + if ( ! expB ) { + if ( ! sigB ) goto zeroProd; + normExpSig = softfloat_normSubnormalF64Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA + expB - 0x3FE; + sigA = (sigA | UINT64_C( 0x0010000000000000 ))<<10; + sigB = (sigB | UINT64_C( 0x0010000000000000 ))<<11; + softfloat_mul64To128M( sigA, sigB, sig128Z ); + sigZ = + (uint64_t) sig128Z[indexWord( 4, 3 )]<<32 | sig128Z[indexWord( 4, 2 )]; + shiftDist = 0; + if ( ! (sigZ & UINT64_C( 0x4000000000000000 )) ) { + --expZ; + shiftDist = -1; + } + if ( ! expC ) { + if ( ! sigC ) { + if ( shiftDist ) sigZ <<= 1; + goto sigZ; + } + normExpSig = softfloat_normSubnormalF64Sig( sigC ); + expC = normExpSig.exp; + sigC = normExpSig.sig; + } + sigC = (sigC | UINT64_C( 0x0010000000000000 ))<<10; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expZ - expC; + if ( expDiff < 0 ) { + expZ = expC; + if ( (signZ == signC) || (expDiff < -1) ) { + shiftDist -= expDiff; + if ( shiftDist) { + sigZ = softfloat_shiftRightJam64( sigZ, shiftDist ); + } + } else { + if ( ! shiftDist ) { + softfloat_shortShiftRight128M( sig128Z, 1, sig128Z ); + } + } + } else { + if ( shiftDist ) softfloat_add128M( sig128Z, sig128Z, sig128Z ); + if ( ! expDiff ) { + sigZ = + (uint64_t) sig128Z[indexWord( 4, 3 )]<<32 + | sig128Z[indexWord( 4, 2 )]; + } else { + sig128C[indexWord( 4, 3 )] = sigC>>32; + sig128C[indexWord( 4, 2 )] = sigC; + sig128C[indexWord( 4, 1 )] = 0; + sig128C[indexWord( 4, 0 )] = 0; + softfloat_shiftRightJam128M( sig128C, expDiff, sig128C ); + } + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( signZ == signC ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expDiff <= 0 ) { + sigZ += sigC; + } else { + softfloat_add128M( sig128Z, sig128C, sig128Z ); + sigZ = + (uint64_t) sig128Z[indexWord( 4, 3 )]<<32 + | sig128Z[indexWord( 4, 2 )]; + } + if ( sigZ & UINT64_C( 0x8000000000000000 ) ) { + ++expZ; + sigZ = softfloat_shortShiftRightJam64( sigZ, 1 ); + } + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expDiff < 0 ) { + signZ = signC; + if ( expDiff < -1 ) { + sigZ = sigC - sigZ; + if ( + sig128Z[indexWord( 4, 1 )] || sig128Z[indexWord( 4, 0 )] + ) { + sigZ = (sigZ - 1) | 1; + } + if ( ! (sigZ & UINT64_C( 0x4000000000000000 )) ) { + --expZ; + sigZ <<= 1; + } + goto roundPack; + } else { + sig128C[indexWord( 4, 3 )] = sigC>>32; + sig128C[indexWord( 4, 2 )] = sigC; + sig128C[indexWord( 4, 1 )] = 0; + sig128C[indexWord( 4, 0 )] = 0; + softfloat_sub128M( sig128C, sig128Z, sig128Z ); + } + } else if ( ! expDiff ) { + sigZ -= sigC; + if ( + ! sigZ && ! sig128Z[indexWord( 4, 1 )] + && ! sig128Z[indexWord( 4, 0 )] + ) { + goto completeCancellation; + } + sig128Z[indexWord( 4, 3 )] = sigZ>>32; + sig128Z[indexWord( 4, 2 )] = sigZ; + if ( sigZ & UINT64_C( 0x8000000000000000 ) ) { + signZ = ! signZ; + softfloat_negX128M( sig128Z ); + } + } else { + softfloat_sub128M( sig128Z, sig128C, sig128Z ); + if ( 1 < expDiff ) { + sigZ = + (uint64_t) sig128Z[indexWord( 4, 3 )]<<32 + | sig128Z[indexWord( 4, 2 )]; + if ( ! (sigZ & UINT64_C( 0x4000000000000000 )) ) { + --expZ; + sigZ <<= 1; + } + goto sigZ; + } + } + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + shiftDist = 0; + sigZ = + (uint64_t) sig128Z[indexWord( 4, 3 )]<<32 + | sig128Z[indexWord( 4, 2 )]; + if ( ! sigZ ) { + shiftDist = 64; + sigZ = + (uint64_t) sig128Z[indexWord( 4, 1 )]<<32 + | sig128Z[indexWord( 4, 0 )]; + } + shiftDist += softfloat_countLeadingZeros64( sigZ ) - 1; + if ( shiftDist ) { + expZ -= shiftDist; + softfloat_shiftLeft128M( sig128Z, shiftDist, sig128Z ); + sigZ = + (uint64_t) sig128Z[indexWord( 4, 3 )]<<32 + | sig128Z[indexWord( 4, 2 )]; + } + } + sigZ: + if ( sig128Z[indexWord( 4, 1 )] || sig128Z[indexWord( 4, 0 )] ) sigZ |= 1; + roundPack: + return softfloat_roundPackToF64( signZ, expZ - 1, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN_ABC: + uiZ = softfloat_propagateNaNF64UI( uiA, uiB ); + goto propagateNaN_ZC; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infProdArg: + if ( magBits ) { + uiZ = packToF64UI( signZ, 0x7FF, 0 ); + if ( expC != 0x7FF ) goto uiZ; + if ( sigC ) goto propagateNaN_ZC; + if ( signZ == signC ) goto uiZ; + } + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF64UI; + propagateNaN_ZC: + uiZ = softfloat_propagateNaNF64UI( uiZ, uiC ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zeroProd: + uiZ = uiC; + if ( ! (expC | sigC) && (signZ != signC) ) { + completeCancellation: + uiZ = + packToF64UI( + (softfloat_roundingMode == softfloat_round_min), 0, 0 ); + } + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_negXM.c b/vendor/riscv-isa-sim/softfloat/s_negXM.c new file mode 100644 index 00000000..76f110c1 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_negXM.c @@ -0,0 +1,63 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_negXM + +void softfloat_negXM( uint_fast8_t size_words, uint32_t *zPtr ) +{ + unsigned int index, lastIndex; + uint_fast8_t carry; + uint32_t word; + + index = indexWordLo( size_words ); + lastIndex = indexWordHi( size_words ); + carry = 1; + for (;;) { + word = ~zPtr[index] + carry; + zPtr[index] = word; + if ( index == lastIndex ) break; + index += wordIncr; + if ( word ) carry = 0; + } + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_normRoundPackToF128.c b/vendor/riscv-isa-sim/softfloat/s_normRoundPackToF128.c new file mode 100644 index 00000000..148cb2c8 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_normRoundPackToF128.c @@ -0,0 +1,81 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" + +float128_t + softfloat_normRoundPackToF128( + bool sign, int_fast32_t exp, uint_fast64_t sig64, uint_fast64_t sig0 ) +{ + int_fast8_t shiftDist; + struct uint128 sig128; + union ui128_f128 uZ; + uint_fast64_t sigExtra; + struct uint128_extra sig128Extra; + + if ( ! sig64 ) { + exp -= 64; + sig64 = sig0; + sig0 = 0; + } + shiftDist = softfloat_countLeadingZeros64( sig64 ) - 15; + exp -= shiftDist; + if ( 0 <= shiftDist ) { + if ( shiftDist ) { + sig128 = softfloat_shortShiftLeft128( sig64, sig0, shiftDist ); + sig64 = sig128.v64; + sig0 = sig128.v0; + } + if ( (uint32_t) exp < 0x7FFD ) { + uZ.ui.v64 = packToF128UI64( sign, sig64 | sig0 ? exp : 0, sig64 ); + uZ.ui.v0 = sig0; + return uZ.f; + } + sigExtra = 0; + } else { + sig128Extra = + softfloat_shortShiftRightJam128Extra( sig64, sig0, 0, -shiftDist ); + sig64 = sig128Extra.v.v64; + sig0 = sig128Extra.v.v0; + sigExtra = sig128Extra.extra; + } + return softfloat_roundPackToF128( sign, exp, sig64, sig0, sigExtra ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_normRoundPackToF16.c b/vendor/riscv-isa-sim/softfloat/s_normRoundPackToF16.c new file mode 100644 index 00000000..6788f2e1 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_normRoundPackToF16.c @@ -0,0 +1,58 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" + +float16_t + softfloat_normRoundPackToF16( bool sign, int_fast16_t exp, uint_fast16_t sig ) +{ + int_fast8_t shiftDist; + union ui16_f16 uZ; + + shiftDist = softfloat_countLeadingZeros16( sig ) - 1; + exp -= shiftDist; + if ( (4 <= shiftDist) && ((unsigned int) exp < 0x1D) ) { + uZ.ui = packToF16UI( sign, sig ? exp : 0, sig<<(shiftDist - 4) ); + return uZ.f; + } else { + return softfloat_roundPackToF16( sign, exp, sig< +#include +#include "platform.h" +#include "internals.h" + +float32_t + softfloat_normRoundPackToF32( bool sign, int_fast16_t exp, uint_fast32_t sig ) +{ + int_fast8_t shiftDist; + union ui32_f32 uZ; + + shiftDist = softfloat_countLeadingZeros32( sig ) - 1; + exp -= shiftDist; + if ( (7 <= shiftDist) && ((unsigned int) exp < 0xFD) ) { + uZ.ui = packToF32UI( sign, sig ? exp : 0, sig<<(shiftDist - 7) ); + return uZ.f; + } else { + return softfloat_roundPackToF32( sign, exp, sig< +#include +#include "platform.h" +#include "internals.h" + +float64_t + softfloat_normRoundPackToF64( bool sign, int_fast16_t exp, uint_fast64_t sig ) +{ + int_fast8_t shiftDist; + union ui64_f64 uZ; + + shiftDist = softfloat_countLeadingZeros64( sig ) - 1; + exp -= shiftDist; + if ( (10 <= shiftDist) && ((unsigned int) exp < 0x7FD) ) { + uZ.ui = packToF64UI( sign, sig ? exp : 0, sig<<(shiftDist - 10) ); + return uZ.f; + } else { + return softfloat_roundPackToF64( sign, exp, sig< +#include "platform.h" +#include "internals.h" + +struct exp32_sig128 + softfloat_normSubnormalF128Sig( uint_fast64_t sig64, uint_fast64_t sig0 ) +{ + int_fast8_t shiftDist; + struct exp32_sig128 z; + + if ( ! sig64 ) { + shiftDist = softfloat_countLeadingZeros64( sig0 ) - 15; + z.exp = -63 - shiftDist; + if ( shiftDist < 0 ) { + z.sig.v64 = sig0>>-shiftDist; + z.sig.v0 = sig0<<(shiftDist & 63); + } else { + z.sig.v64 = sig0< +#include "platform.h" +#include "internals.h" + +struct exp8_sig16 softfloat_normSubnormalF16Sig( uint_fast16_t sig ) +{ + int_fast8_t shiftDist; + struct exp8_sig16 z; + + shiftDist = softfloat_countLeadingZeros16( sig ) - 5; + z.exp = 1 - shiftDist; + z.sig = sig< +#include "platform.h" +#include "internals.h" + +struct exp16_sig32 softfloat_normSubnormalF32Sig( uint_fast32_t sig ) +{ + int_fast8_t shiftDist; + struct exp16_sig32 z; + + shiftDist = softfloat_countLeadingZeros32( sig ) - 8; + z.exp = 1 - shiftDist; + z.sig = sig< +#include "platform.h" +#include "internals.h" + +struct exp16_sig64 softfloat_normSubnormalF64Sig( uint_fast64_t sig ) +{ + int_fast8_t shiftDist; + struct exp16_sig64 z; + + shiftDist = softfloat_countLeadingZeros64( sig ) - 11; + z.exp = 1 - shiftDist; + z.sig = sig< +#include "platform.h" +#include "primitiveTypes.h" +#include "specialize.h" +#include "softfloat.h" + +/*---------------------------------------------------------------------------- +| Interpreting the unsigned integer formed from concatenating `uiA64' and +| `uiA0' as a 128-bit floating-point value, and likewise interpreting the +| unsigned integer formed from concatenating `uiB64' and `uiB0' as another +| 128-bit floating-point value, and assuming at least on of these floating- +| point values is a NaN, returns the bit pattern of the combined NaN result. +| If either original floating-point value is a signaling NaN, the invalid +| exception is raised. +*----------------------------------------------------------------------------*/ +struct uint128 + softfloat_propagateNaNF128UI( + uint_fast64_t uiA64, + uint_fast64_t uiA0, + uint_fast64_t uiB64, + uint_fast64_t uiB0 + ) +{ + struct uint128 uiZ; + + if ( + softfloat_isSigNaNF128UI( uiA64, uiA0 ) + || softfloat_isSigNaNF128UI( uiB64, uiB0 ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + uiZ.v64 = defaultNaNF128UI64; + uiZ.v0 = defaultNaNF128UI0; + return uiZ; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_propagateNaNF16UI.c b/vendor/riscv-isa-sim/softfloat/s_propagateNaNF16UI.c new file mode 100644 index 00000000..3ecd4c98 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_propagateNaNF16UI.c @@ -0,0 +1,58 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "specialize.h" +#include "softfloat.h" + +/*---------------------------------------------------------------------------- +| Interpreting `uiA' and `uiB' as the bit patterns of two 16-bit floating- +| point values, at least one of which is a NaN, returns the bit pattern of +| the combined NaN result. If either `uiA' or `uiB' has the pattern of a +| signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ +uint_fast16_t + softfloat_propagateNaNF16UI( uint_fast16_t uiA, uint_fast16_t uiB ) +{ + + if ( softfloat_isSigNaNF16UI( uiA ) || softfloat_isSigNaNF16UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return defaultNaNF16UI; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_propagateNaNF32UI.c b/vendor/riscv-isa-sim/softfloat/s_propagateNaNF32UI.c new file mode 100644 index 00000000..b97fa414 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_propagateNaNF32UI.c @@ -0,0 +1,58 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "specialize.h" +#include "softfloat.h" + +/*---------------------------------------------------------------------------- +| Interpreting `uiA' and `uiB' as the bit patterns of two 32-bit floating- +| point values, at least one of which is a NaN, returns the bit pattern of +| the combined NaN result. If either `uiA' or `uiB' has the pattern of a +| signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ +uint_fast32_t + softfloat_propagateNaNF32UI( uint_fast32_t uiA, uint_fast32_t uiB ) +{ + + if ( softfloat_isSigNaNF32UI( uiA ) || softfloat_isSigNaNF32UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return defaultNaNF32UI; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_propagateNaNF64UI.c b/vendor/riscv-isa-sim/softfloat/s_propagateNaNF64UI.c new file mode 100644 index 00000000..9c2d3598 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_propagateNaNF64UI.c @@ -0,0 +1,58 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "specialize.h" +#include "softfloat.h" + +/*---------------------------------------------------------------------------- +| Interpreting `uiA' and `uiB' as the bit patterns of two 64-bit floating- +| point values, at least one of which is a NaN, returns the bit pattern of +| the combined NaN result. If either `uiA' or `uiB' has the pattern of a +| signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ +uint_fast64_t + softfloat_propagateNaNF64UI( uint_fast64_t uiA, uint_fast64_t uiB ) +{ + + if ( softfloat_isSigNaNF64UI( uiA ) || softfloat_isSigNaNF64UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return defaultNaNF64UI; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_remStepMBy32.c b/vendor/riscv-isa-sim/softfloat/s_remStepMBy32.c new file mode 100644 index 00000000..fe787a43 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_remStepMBy32.c @@ -0,0 +1,86 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_remStepMBy32 + +void + softfloat_remStepMBy32( + uint_fast8_t size_words, + const uint32_t *remPtr, + uint_fast8_t dist, + const uint32_t *bPtr, + uint32_t q, + uint32_t *zPtr + ) +{ + unsigned int index, lastIndex; + uint64_t dwordProd; + uint32_t wordRem, wordShiftedRem, wordProd; + uint_fast8_t uNegDist, borrow; + + index = indexWordLo( size_words ); + lastIndex = indexWordHi( size_words ); + dwordProd = (uint64_t) bPtr[index] * q; + wordRem = remPtr[index]; + wordShiftedRem = wordRem<>(uNegDist & 31); + index += wordIncr; + dwordProd = (uint64_t) bPtr[index] * q + (dwordProd>>32); + wordRem = remPtr[index]; + wordShiftedRem |= wordRem< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t + softfloat_roundMToI64( + bool sign, uint32_t *extSigPtr, uint_fast8_t roundingMode, bool exact ) +{ + bool roundNearEven; + uint32_t sigExtra; + bool doIncrement; + uint64_t sig; + union { uint64_t ui; int64_t i; } uZ; + int64_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + sigExtra = extSigPtr[indexWordLo( 3 )]; + doIncrement = (0x80000000 <= sigExtra); + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + doIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + && sigExtra; + } + sig = + (uint64_t) extSigPtr[indexWord( 3, 2 )]<<32 + | extSigPtr[indexWord( 3, 1 )]; + if ( doIncrement ) { + ++sig; + if ( ! sig ) goto invalid; + if ( ! (sigExtra & 0x7FFFFFFF) && roundNearEven ) sig &= ~1; + } + uZ.ui = sign ? -sig : sig; + z = uZ.i; + if ( z && ((z < 0) ^ sign) ) goto invalid; + if ( exact && sigExtra ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return z; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? i64_fromNegOverflow : i64_fromPosOverflow; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_roundMToUI64.c b/vendor/riscv-isa-sim/softfloat/s_roundMToUI64.c new file mode 100644 index 00000000..0377c5bb --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_roundMToUI64.c @@ -0,0 +1,84 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t + softfloat_roundMToUI64( + bool sign, uint32_t *extSigPtr, uint_fast8_t roundingMode, bool exact ) +{ + bool roundNearEven; + uint32_t sigExtra; + bool doIncrement; + uint64_t sig; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + sigExtra = extSigPtr[indexWordLo( 3 )]; + doIncrement = (0x80000000 <= sigExtra); + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + doIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + && sigExtra; + } + sig = + (uint64_t) extSigPtr[indexWord( 3, 2 )]<<32 + | extSigPtr[indexWord( 3, 1 )]; + if ( doIncrement ) { + ++sig; + if ( ! sig ) goto invalid; + if ( ! (sigExtra & 0x7FFFFFFF) && roundNearEven ) sig &= ~1; + } + if ( sign && sig ) goto invalid; + if ( exact && sigExtra ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return sig; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_roundPackMToI64.c b/vendor/riscv-isa-sim/softfloat/s_roundPackMToI64.c new file mode 100644 index 00000000..4d5efbb7 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_roundPackMToI64.c @@ -0,0 +1,88 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3a+, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t + softfloat_roundPackMToI64( + bool sign, uint32_t *extSigPtr, uint_fast8_t roundingMode, bool exact ) +{ + bool roundNearEven; + uint32_t sigExtra; + bool doIncrement; + uint64_t sig; + union { uint64_t ui; int64_t i; } uZ; + int64_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + sigExtra = extSigPtr[indexWordLo( 3 )]; + doIncrement = (0x80000000 <= sigExtra); + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + doIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + && sigExtra; + } + sig = + (uint64_t) extSigPtr[indexWord( 3, 2 )]<<32 + | extSigPtr[indexWord( 3, 1 )]; + if ( doIncrement ) { + ++sig; + if ( ! sig ) goto invalid; + if ( ! (sigExtra & 0x7FFFFFFF) && roundNearEven ) sig &= ~1; + } + uZ.ui = sign ? -sig : sig; + z = uZ.i; + if ( z && ((z < 0) ^ sign) ) goto invalid; + if ( exact && sigExtra ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return z; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? i64_fromNegOverflow : i64_fromPosOverflow; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_roundPackMToUI64.c b/vendor/riscv-isa-sim/softfloat/s_roundPackMToUI64.c new file mode 100644 index 00000000..1a64fdf9 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_roundPackMToUI64.c @@ -0,0 +1,84 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3a+, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t + softfloat_roundPackMToUI64( + bool sign, uint32_t *extSigPtr, uint_fast8_t roundingMode, bool exact ) +{ + bool roundNearEven; + uint32_t sigExtra; + bool doIncrement; + uint64_t sig; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + sigExtra = extSigPtr[indexWordLo( 3 )]; + doIncrement = (0x80000000 <= sigExtra); + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + doIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + && sigExtra; + } + sig = + (uint64_t) extSigPtr[indexWord( 3, 2 )]<<32 + | extSigPtr[indexWord( 3, 1 )]; + if ( doIncrement ) { + ++sig; + if ( ! sig ) goto invalid; + if ( ! (sigExtra & 0x7FFFFFFF) && roundNearEven ) sig &= ~1; + } + if ( sign && sig ) goto invalid; + if ( exact && sigExtra ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return sig; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_roundPackToF128.c b/vendor/riscv-isa-sim/softfloat/s_roundPackToF128.c new file mode 100644 index 00000000..eaaa375c --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_roundPackToF128.c @@ -0,0 +1,171 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float128_t + softfloat_roundPackToF128( + bool sign, + int_fast32_t exp, + uint_fast64_t sig64, + uint_fast64_t sig0, + uint_fast64_t sigExtra + ) +{ + uint_fast8_t roundingMode; + bool roundNearEven, doIncrement, isTiny; + struct uint128_extra sig128Extra; + uint_fast64_t uiZ64, uiZ0; + struct uint128 sig128; + union ui128_f128 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundingMode = softfloat_roundingMode; + roundNearEven = (roundingMode == softfloat_round_near_even); + doIncrement = (UINT64_C( 0x8000000000000000 ) <= sigExtra); + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + doIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + && sigExtra; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( 0x7FFD <= (uint32_t) exp ) { + if ( exp < 0 ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + isTiny = + (softfloat_detectTininess + == softfloat_tininess_beforeRounding) + || (exp < -1) + || ! doIncrement + || softfloat_lt128( + sig64, + sig0, + UINT64_C( 0x0001FFFFFFFFFFFF ), + UINT64_C( 0xFFFFFFFFFFFFFFFF ) + ); + sig128Extra = + softfloat_shiftRightJam128Extra( sig64, sig0, sigExtra, -exp ); + sig64 = sig128Extra.v.v64; + sig0 = sig128Extra.v.v0; + sigExtra = sig128Extra.extra; + exp = 0; + if ( isTiny && sigExtra ) { + softfloat_raiseFlags( softfloat_flag_underflow ); + } + doIncrement = (UINT64_C( 0x8000000000000000 ) <= sigExtra); + if ( + ! roundNearEven + && (roundingMode != softfloat_round_near_maxMag) + ) { + doIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + && sigExtra; + } + } else if ( + (0x7FFD < exp) + || ((exp == 0x7FFD) + && softfloat_eq128( + sig64, + sig0, + UINT64_C( 0x0001FFFFFFFFFFFF ), + UINT64_C( 0xFFFFFFFFFFFFFFFF ) + ) + && doIncrement) + ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + softfloat_raiseFlags( + softfloat_flag_overflow | softfloat_flag_inexact ); + if ( + roundNearEven + || (roundingMode == softfloat_round_near_maxMag) + || (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + ) { + uiZ64 = packToF128UI64( sign, 0x7FFF, 0 ); + uiZ0 = 0; + } else { + uiZ64 = + packToF128UI64( + sign, 0x7FFE, UINT64_C( 0x0000FFFFFFFFFFFF ) ); + uiZ0 = UINT64_C( 0xFFFFFFFFFFFFFFFF ); + } + goto uiZ; + } + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( sigExtra ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; +#ifdef SOFTFLOAT_ROUND_ODD + if ( roundingMode == softfloat_round_odd ) { + sig0 |= 1; + goto packReturn; + } +#endif + } + if ( doIncrement ) { + sig128 = softfloat_add128( sig64, sig0, 0, 1 ); + sig64 = sig128.v64; + sig0 = + sig128.v0 + & ~(uint64_t) + (! (sigExtra & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + & roundNearEven); + } else { + if ( ! (sig64 | sig0) ) exp = 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + packReturn: + uiZ64 = packToF128UI64( sign, exp, sig64 ); + uiZ0 = sig0; + uiZ: + uZ.ui.v64 = uiZ64; + uZ.ui.v0 = uiZ0; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_roundPackToF16.c b/vendor/riscv-isa-sim/softfloat/s_roundPackToF16.c new file mode 100644 index 00000000..0eaa73a5 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_roundPackToF16.c @@ -0,0 +1,113 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float16_t + softfloat_roundPackToF16( bool sign, int_fast16_t exp, uint_fast16_t sig ) +{ + uint_fast8_t roundingMode; + bool roundNearEven; + uint_fast8_t roundIncrement, roundBits; + bool isTiny; + uint_fast16_t uiZ; + union ui16_f16 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundingMode = softfloat_roundingMode; + roundNearEven = (roundingMode == softfloat_round_near_even); + roundIncrement = 0x8; + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + roundIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + ? 0xF + : 0; + } + roundBits = sig & 0xF; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( 0x1D <= (unsigned int) exp ) { + if ( exp < 0 ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + isTiny = + (softfloat_detectTininess == softfloat_tininess_beforeRounding) + || (exp < -1) || (sig + roundIncrement < 0x8000); + sig = softfloat_shiftRightJam32( sig, -exp ); + exp = 0; + roundBits = sig & 0xF; + if ( isTiny && roundBits ) { + softfloat_raiseFlags( softfloat_flag_underflow ); + } + } else if ( (0x1D < exp) || (0x8000 <= sig + roundIncrement) ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + softfloat_raiseFlags( + softfloat_flag_overflow | softfloat_flag_inexact ); + uiZ = packToF16UI( sign, 0x1F, 0 ) - ! roundIncrement; + goto uiZ; + } + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig = (sig + roundIncrement)>>4; + if ( roundBits ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; +#ifdef SOFTFLOAT_ROUND_ODD + if ( roundingMode == softfloat_round_odd ) { + sig |= 1; + goto packReturn; + } +#endif + } + sig &= ~(uint_fast16_t) (! (roundBits ^ 8) & roundNearEven); + if ( ! sig ) exp = 0; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + packReturn: + uiZ = packToF16UI( sign, exp, sig ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_roundPackToF32.c b/vendor/riscv-isa-sim/softfloat/s_roundPackToF32.c new file mode 100644 index 00000000..cc345085 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_roundPackToF32.c @@ -0,0 +1,113 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t + softfloat_roundPackToF32( bool sign, int_fast16_t exp, uint_fast32_t sig ) +{ + uint_fast8_t roundingMode; + bool roundNearEven; + uint_fast8_t roundIncrement, roundBits; + bool isTiny; + uint_fast32_t uiZ; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundingMode = softfloat_roundingMode; + roundNearEven = (roundingMode == softfloat_round_near_even); + roundIncrement = 0x40; + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + roundIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + ? 0x7F + : 0; + } + roundBits = sig & 0x7F; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( 0xFD <= (unsigned int) exp ) { + if ( exp < 0 ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + isTiny = + (softfloat_detectTininess == softfloat_tininess_beforeRounding) + || (exp < -1) || (sig + roundIncrement < 0x80000000); + sig = softfloat_shiftRightJam32( sig, -exp ); + exp = 0; + roundBits = sig & 0x7F; + if ( isTiny && roundBits ) { + softfloat_raiseFlags( softfloat_flag_underflow ); + } + } else if ( (0xFD < exp) || (0x80000000 <= sig + roundIncrement) ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + softfloat_raiseFlags( + softfloat_flag_overflow | softfloat_flag_inexact ); + uiZ = packToF32UI( sign, 0xFF, 0 ) - ! roundIncrement; + goto uiZ; + } + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig = (sig + roundIncrement)>>7; + if ( roundBits ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; +#ifdef SOFTFLOAT_ROUND_ODD + if ( roundingMode == softfloat_round_odd ) { + sig |= 1; + goto packReturn; + } +#endif + } + sig &= ~(uint_fast32_t) (! (roundBits ^ 0x40) & roundNearEven); + if ( ! sig ) exp = 0; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + packReturn: + uiZ = packToF32UI( sign, exp, sig ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_roundPackToF64.c b/vendor/riscv-isa-sim/softfloat/s_roundPackToF64.c new file mode 100644 index 00000000..aaff008c --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_roundPackToF64.c @@ -0,0 +1,117 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t + softfloat_roundPackToF64( bool sign, int_fast16_t exp, uint_fast64_t sig ) +{ + uint_fast8_t roundingMode; + bool roundNearEven; + uint_fast16_t roundIncrement, roundBits; + bool isTiny; + uint_fast64_t uiZ; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundingMode = softfloat_roundingMode; + roundNearEven = (roundingMode == softfloat_round_near_even); + roundIncrement = 0x200; + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + roundIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + ? 0x3FF + : 0; + } + roundBits = sig & 0x3FF; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( 0x7FD <= (uint16_t) exp ) { + if ( exp < 0 ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + isTiny = + (softfloat_detectTininess == softfloat_tininess_beforeRounding) + || (exp < -1) + || (sig + roundIncrement < UINT64_C( 0x8000000000000000 )); + sig = softfloat_shiftRightJam64( sig, -exp ); + exp = 0; + roundBits = sig & 0x3FF; + if ( isTiny && roundBits ) { + softfloat_raiseFlags( softfloat_flag_underflow ); + } + } else if ( + (0x7FD < exp) + || (UINT64_C( 0x8000000000000000 ) <= sig + roundIncrement) + ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + softfloat_raiseFlags( + softfloat_flag_overflow | softfloat_flag_inexact ); + uiZ = packToF64UI( sign, 0x7FF, 0 ) - ! roundIncrement; + goto uiZ; + } + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig = (sig + roundIncrement)>>10; + if ( roundBits ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; +#ifdef SOFTFLOAT_ROUND_ODD + if ( roundingMode == softfloat_round_odd ) { + sig |= 1; + goto packReturn; + } +#endif + } + sig &= ~(uint_fast64_t) (! (roundBits ^ 0x200) & roundNearEven); + if ( ! sig ) exp = 0; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + packReturn: + uiZ = packToF64UI( sign, exp, sig ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_roundPackToI32.c b/vendor/riscv-isa-sim/softfloat/s_roundPackToI32.c new file mode 100644 index 00000000..3ece8f05 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_roundPackToI32.c @@ -0,0 +1,84 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3a+, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast32_t + softfloat_roundPackToI32( + bool sign, uint_fast64_t sig, uint_fast8_t roundingMode, bool exact ) +{ + bool roundNearEven; + uint_fast8_t roundIncrement, roundBits; + uint_fast32_t sig32; + union { uint32_t ui; int32_t i; } uZ; + int_fast32_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + roundIncrement = 0x40; + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + roundIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + ? 0x7F + : 0; + } + roundBits = sig & 0x7F; + sig += roundIncrement; + if ( sig & UINT64_C( 0xFFFFFF8000000000 ) ) goto invalid; + sig32 = sig>>7; + sig32 &= ~(uint_fast32_t) (! (roundBits ^ 0x40) & roundNearEven); + uZ.ui = sign ? -sig32 : sig32; + z = uZ.i; + if ( z && ((z < 0) ^ sign) ) goto invalid; + if ( exact && roundBits ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return z; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? i32_fromNegOverflow : i32_fromPosOverflow; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_roundPackToI64.c b/vendor/riscv-isa-sim/softfloat/s_roundPackToI64.c new file mode 100644 index 00000000..ebef7f36 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_roundPackToI64.c @@ -0,0 +1,89 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3a+, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t + softfloat_roundPackToI64( + bool sign, + uint_fast64_t sig, + uint_fast64_t sigExtra, + uint_fast8_t roundingMode, + bool exact + ) +{ + bool roundNearEven, doIncrement; + union { uint64_t ui; int64_t i; } uZ; + int_fast64_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + doIncrement = (UINT64_C( 0x8000000000000000 ) <= sigExtra); + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + doIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + && sigExtra; + } + if ( doIncrement ) { + ++sig; + if ( ! sig ) goto invalid; + sig &= + ~(uint_fast64_t) + (! (sigExtra & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + & roundNearEven); + } + uZ.ui = sign ? -sig : sig; + z = uZ.i; + if ( z && ((z < 0) ^ sign) ) goto invalid; + if ( exact && sigExtra ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return z; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? i64_fromNegOverflow : i64_fromPosOverflow; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_roundPackToUI32.c b/vendor/riscv-isa-sim/softfloat/s_roundPackToUI32.c new file mode 100644 index 00000000..f0021fe5 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_roundPackToUI32.c @@ -0,0 +1,80 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3a+, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast32_t + softfloat_roundPackToUI32( + bool sign, uint_fast64_t sig, uint_fast8_t roundingMode, bool exact ) +{ + bool roundNearEven; + uint_fast8_t roundIncrement, roundBits; + uint_fast32_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + roundIncrement = 0x40; + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + roundIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + ? 0x7F + : 0; + } + roundBits = sig & 0x7F; + sig += roundIncrement; + if ( sig & UINT64_C( 0xFFFFFF8000000000 ) ) goto invalid; + z = sig>>7; + z &= ~(uint_fast32_t) (! (roundBits ^ 0x40) & roundNearEven); + if ( sign && z ) goto invalid; + if ( exact && roundBits ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return z; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? ui32_fromNegOverflow : ui32_fromPosOverflow; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_roundPackToUI64.c b/vendor/riscv-isa-sim/softfloat/s_roundPackToUI64.c new file mode 100644 index 00000000..fada1840 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_roundPackToUI64.c @@ -0,0 +1,85 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3a+, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t + softfloat_roundPackToUI64( + bool sign, + uint_fast64_t sig, + uint_fast64_t sigExtra, + uint_fast8_t roundingMode, + bool exact + ) +{ + bool roundNearEven, doIncrement; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + doIncrement = (UINT64_C( 0x8000000000000000 ) <= sigExtra); + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + doIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + && sigExtra; + } + if ( doIncrement ) { + ++sig; + if ( ! sig ) goto invalid; + sig &= + ~(uint_fast64_t) + (! (sigExtra & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + & roundNearEven); + } + if ( sign && sig ) goto invalid; + if ( exact && sigExtra ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return sig; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_roundToI32.c b/vendor/riscv-isa-sim/softfloat/s_roundToI32.c new file mode 100644 index 00000000..20a3ff4f --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_roundToI32.c @@ -0,0 +1,84 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast32_t + softfloat_roundToI32( + bool sign, uint_fast64_t sig, uint_fast8_t roundingMode, bool exact ) +{ + bool roundNearEven; + uint_fast16_t roundIncrement, roundBits; + uint_fast32_t sig32; + union { uint32_t ui; int32_t i; } uZ; + int_fast32_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + roundIncrement = 0x800; + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + roundIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + ? 0xFFF + : 0; + } + roundBits = sig & 0xFFF; + sig += roundIncrement; + if ( sig & UINT64_C( 0xFFFFF00000000000 ) ) goto invalid; + sig32 = sig>>12; + sig32 &= ~(uint_fast32_t) (! (roundBits ^ 0x800) & roundNearEven); + uZ.ui = sign ? -sig32 : sig32; + z = uZ.i; + if ( z && ((z < 0) ^ sign) ) goto invalid; + if ( exact && roundBits ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return z; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? i32_fromNegOverflow : i32_fromPosOverflow; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_roundToI64.c b/vendor/riscv-isa-sim/softfloat/s_roundToI64.c new file mode 100644 index 00000000..fcddbc27 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_roundToI64.c @@ -0,0 +1,89 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t + softfloat_roundToI64( + bool sign, + uint_fast64_t sig, + uint_fast64_t sigExtra, + uint_fast8_t roundingMode, + bool exact + ) +{ + bool roundNearEven, doIncrement; + union { uint64_t ui; int64_t i; } uZ; + int_fast64_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + doIncrement = (UINT64_C( 0x8000000000000000 ) <= sigExtra); + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + doIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + && sigExtra; + } + if ( doIncrement ) { + ++sig; + if ( ! sig ) goto invalid; + sig &= + ~(uint_fast64_t) + (! (sigExtra & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + & roundNearEven); + } + uZ.ui = sign ? -sig : sig; + z = uZ.i; + if ( z && ((z < 0) ^ sign) ) goto invalid; + if ( exact && sigExtra ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return z; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? i64_fromNegOverflow : i64_fromPosOverflow; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_roundToUI32.c b/vendor/riscv-isa-sim/softfloat/s_roundToUI32.c new file mode 100644 index 00000000..180899bd --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_roundToUI32.c @@ -0,0 +1,80 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast32_t + softfloat_roundToUI32( + bool sign, uint_fast64_t sig, uint_fast8_t roundingMode, bool exact ) +{ + bool roundNearEven; + uint_fast16_t roundIncrement, roundBits; + uint_fast32_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + roundIncrement = 0x800; + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + roundIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + ? 0xFFF + : 0; + } + roundBits = sig & 0xFFF; + sig += roundIncrement; + if ( sig & UINT64_C( 0xFFFFF00000000000 ) ) goto invalid; + z = sig>>12; + z &= ~(uint_fast32_t) (! (roundBits ^ 0x800) & roundNearEven); + if ( sign && z ) goto invalid; + if ( exact && roundBits ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return z; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? ui32_fromNegOverflow : ui32_fromPosOverflow; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_roundToUI64.c b/vendor/riscv-isa-sim/softfloat/s_roundToUI64.c new file mode 100644 index 00000000..de35b5eb --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_roundToUI64.c @@ -0,0 +1,85 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t + softfloat_roundToUI64( + bool sign, + uint_fast64_t sig, + uint_fast64_t sigExtra, + uint_fast8_t roundingMode, + bool exact + ) +{ + bool roundNearEven, doIncrement; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + doIncrement = (UINT64_C( 0x8000000000000000 ) <= sigExtra); + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + doIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + && sigExtra; + } + if ( doIncrement ) { + ++sig; + if ( ! sig ) goto invalid; + sig &= + ~(uint_fast64_t) + (! (sigExtra & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + & roundNearEven); + } + if ( sign && sig ) goto invalid; + if ( exact && sigExtra ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return sig; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_shiftRightJam128.c b/vendor/riscv-isa-sim/softfloat/s_shiftRightJam128.c new file mode 100644 index 00000000..8d2b91e8 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_shiftRightJam128.c @@ -0,0 +1,69 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_shiftRightJam128 + +struct uint128 + softfloat_shiftRightJam128( uint64_t a64, uint64_t a0, uint_fast32_t dist ) +{ + uint_fast8_t u8NegDist; + struct uint128 z; + + if ( dist < 64 ) { + u8NegDist = -dist; + z.v64 = a64>>dist; + z.v0 = + a64<<(u8NegDist & 63) | a0>>dist + | ((uint64_t) (a0<<(u8NegDist & 63)) != 0); + } else { + z.v64 = 0; + z.v0 = + (dist < 127) + ? a64>>(dist & 63) + | (((a64 & (((uint_fast64_t) 1<<(dist & 63)) - 1)) | a0) + != 0) + : ((a64 | a0) != 0); + } + return z; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_shiftRightJam128Extra.c b/vendor/riscv-isa-sim/softfloat/s_shiftRightJam128Extra.c new file mode 100644 index 00000000..4e1293c7 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_shiftRightJam128Extra.c @@ -0,0 +1,77 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_shiftRightJam128Extra + +struct uint128_extra + softfloat_shiftRightJam128Extra( + uint64_t a64, uint64_t a0, uint64_t extra, uint_fast32_t dist ) +{ + uint_fast8_t u8NegDist; + struct uint128_extra z; + + u8NegDist = -dist; + if ( dist < 64 ) { + z.v.v64 = a64>>dist; + z.v.v0 = a64<<(u8NegDist & 63) | a0>>dist; + z.extra = a0<<(u8NegDist & 63); + } else { + z.v.v64 = 0; + if ( dist == 64 ) { + z.v.v0 = a64; + z.extra = a0; + } else { + extra |= a0; + if ( dist < 128 ) { + z.v.v0 = a64>>(dist & 63); + z.extra = a64<<(u8NegDist & 63); + } else { + z.v.v0 = 0; + z.extra = (dist == 128) ? a64 : (a64 != 0); + } + } + } + z.extra |= (extra != 0); + return z; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_shiftRightJam256M.c b/vendor/riscv-isa-sim/softfloat/s_shiftRightJam256M.c new file mode 100644 index 00000000..04cd1e50 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_shiftRightJam256M.c @@ -0,0 +1,126 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_shiftRightJam256M + +static + void + softfloat_shortShiftRightJamM( + uint_fast8_t size_words, + const uint64_t *aPtr, + uint_fast8_t dist, + uint64_t *zPtr + ) +{ + uint_fast8_t uNegDist; + unsigned int index, lastIndex; + uint64_t partWordZ, wordA; + + uNegDist = -dist; + index = indexWordLo( size_words ); + lastIndex = indexWordHi( size_words ); + wordA = aPtr[index]; + partWordZ = wordA>>dist; + if ( partWordZ<>dist; + } + zPtr[index] = partWordZ; + +} + +void + softfloat_shiftRightJam256M( + const uint64_t *aPtr, uint_fast32_t dist, uint64_t *zPtr ) +{ + uint64_t wordJam; + uint_fast32_t wordDist; + uint64_t *ptr; + uint_fast8_t i, innerDist; + + wordJam = 0; + wordDist = dist>>6; + if ( wordDist ) { + if ( 4 < wordDist ) wordDist = 4; + ptr = (uint64_t *) (aPtr + indexMultiwordLo( 4, wordDist )); + i = wordDist; + do { + wordJam = *ptr++; + if ( wordJam ) break; + --i; + } while ( i ); + ptr = zPtr; + } + if ( wordDist < 4 ) { + aPtr += indexMultiwordHiBut( 4, wordDist ); + innerDist = dist & 63; + if ( innerDist ) { + softfloat_shortShiftRightJamM( + 4 - wordDist, + aPtr, + innerDist, + zPtr + indexMultiwordLoBut( 4, wordDist ) + ); + if ( ! wordDist ) goto wordJam; + } else { + aPtr += indexWordLo( 4 - wordDist ); + ptr = zPtr + indexWordLo( 4 ); + for ( i = 4 - wordDist; i; --i ) { + *ptr = *aPtr; + aPtr += wordIncr; + ptr += wordIncr; + } + } + ptr = zPtr + indexMultiwordHi( 4, wordDist ); + } + do { + *ptr++ = 0; + --wordDist; + } while ( wordDist ); + wordJam: + if ( wordJam ) zPtr[indexWordLo( 4 )] |= 1; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_shiftRightJam32.c b/vendor/riscv-isa-sim/softfloat/s_shiftRightJam32.c new file mode 100644 index 00000000..fbc3aa01 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_shiftRightJam32.c @@ -0,0 +1,51 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" + +#ifndef softfloat_shiftRightJam32 + +uint32_t softfloat_shiftRightJam32( uint32_t a, uint_fast16_t dist ) +{ + + return + (dist < 31) ? a>>dist | ((uint32_t) (a<<(-dist & 31)) != 0) : (a != 0); + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_shiftRightJam64.c b/vendor/riscv-isa-sim/softfloat/s_shiftRightJam64.c new file mode 100644 index 00000000..34edd7bf --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_shiftRightJam64.c @@ -0,0 +1,51 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" + +#ifndef softfloat_shiftRightJam64 + +uint64_t softfloat_shiftRightJam64( uint64_t a, uint_fast32_t dist ) +{ + + return + (dist < 63) ? a>>dist | ((uint64_t) (a<<(-dist & 63)) != 0) : (a != 0); + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_shiftRightJam64Extra.c b/vendor/riscv-isa-sim/softfloat/s_shiftRightJam64Extra.c new file mode 100644 index 00000000..4d787122 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_shiftRightJam64Extra.c @@ -0,0 +1,62 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_shiftRightJam64Extra + +struct uint64_extra + softfloat_shiftRightJam64Extra( + uint64_t a, uint64_t extra, uint_fast32_t dist ) +{ + struct uint64_extra z; + + if ( dist < 64 ) { + z.v = a>>dist; + z.extra = a<<(-dist & 63); + } else { + z.v = 0; + z.extra = (dist == 64) ? a : (a != 0); + } + z.extra |= (extra != 0); + return z; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_shortShiftLeft128.c b/vendor/riscv-isa-sim/softfloat/s_shortShiftLeft128.c new file mode 100644 index 00000000..9b7c0672 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_shortShiftLeft128.c @@ -0,0 +1,55 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_shortShiftLeft128 + +struct uint128 + softfloat_shortShiftLeft128( uint64_t a64, uint64_t a0, uint_fast8_t dist ) +{ + struct uint128 z; + + z.v64 = a64<>(-dist & 63); + z.v0 = a0< +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_shortShiftLeft64To96M + +void + softfloat_shortShiftLeft64To96M( + uint64_t a, uint_fast8_t dist, uint32_t *zPtr ) +{ + + zPtr[indexWord( 3, 0 )] = (uint32_t) a<>= 32 - dist; + zPtr[indexWord( 3, 2 )] = a>>32; + zPtr[indexWord( 3, 1 )] = a; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_shortShiftRight128.c b/vendor/riscv-isa-sim/softfloat/s_shortShiftRight128.c new file mode 100644 index 00000000..28c39bb2 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_shortShiftRight128.c @@ -0,0 +1,55 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_shortShiftRight128 + +struct uint128 + softfloat_shortShiftRight128( uint64_t a64, uint64_t a0, uint_fast8_t dist ) +{ + struct uint128 z; + + z.v64 = a64>>dist; + z.v0 = a64<<(-dist & 63) | a0>>dist; + return z; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_shortShiftRightExtendM.c b/vendor/riscv-isa-sim/softfloat/s_shortShiftRightExtendM.c new file mode 100644 index 00000000..309188c3 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_shortShiftRightExtendM.c @@ -0,0 +1,73 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_shortShiftRightExtendM + +void + softfloat_shortShiftRightExtendM( + uint_fast8_t size_words, + const uint32_t *aPtr, + uint_fast8_t dist, + uint32_t *zPtr + ) +{ + uint_fast8_t uNegDist; + unsigned int indexA, lastIndexA; + uint32_t partWordZ, wordA; + + uNegDist = -dist; + indexA = indexWordLo( size_words ); + lastIndexA = indexWordHi( size_words ); + zPtr += indexWordLo( size_words + 1 ); + partWordZ = 0; + for (;;) { + wordA = aPtr[indexA]; + *zPtr = wordA<<(uNegDist & 31) | partWordZ; + zPtr += wordIncr; + partWordZ = wordA>>dist; + if ( indexA == lastIndexA ) break; + indexA += wordIncr; + } + *zPtr = partWordZ; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_shortShiftRightJam128.c b/vendor/riscv-isa-sim/softfloat/s_shortShiftRightJam128.c new file mode 100644 index 00000000..3eb0dd40 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_shortShiftRightJam128.c @@ -0,0 +1,60 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_shortShiftRightJam128 + +struct uint128 + softfloat_shortShiftRightJam128( + uint64_t a64, uint64_t a0, uint_fast8_t dist ) +{ + uint_fast8_t uNegDist; + struct uint128 z; + + uNegDist = -dist; + z.v64 = a64>>dist; + z.v0 = + a64<<(uNegDist & 63) | a0>>dist + | ((uint64_t) (a0<<(uNegDist & 63)) != 0); + return z; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_shortShiftRightJam128Extra.c b/vendor/riscv-isa-sim/softfloat/s_shortShiftRightJam128Extra.c new file mode 100644 index 00000000..13692a0d --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_shortShiftRightJam128Extra.c @@ -0,0 +1,59 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_shortShiftRightJam128Extra + +struct uint128_extra + softfloat_shortShiftRightJam128Extra( + uint64_t a64, uint64_t a0, uint64_t extra, uint_fast8_t dist ) +{ + uint_fast8_t uNegDist; + struct uint128_extra z; + + uNegDist = -dist; + z.v.v64 = a64>>dist; + z.v.v0 = a64<<(uNegDist & 63) | a0>>dist; + z.extra = a0<<(uNegDist & 63) | (extra != 0); + return z; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_shortShiftRightJam64.c b/vendor/riscv-isa-sim/softfloat/s_shortShiftRightJam64.c new file mode 100644 index 00000000..7e93cd4f --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_shortShiftRightJam64.c @@ -0,0 +1,50 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" + +#ifndef softfloat_shortShiftRightJam64 + +uint64_t softfloat_shortShiftRightJam64( uint64_t a, uint_fast8_t dist ) +{ + + return a>>dist | ((a & (((uint_fast64_t) 1< +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_shortShiftRightJam64Extra + +struct uint64_extra + softfloat_shortShiftRightJam64Extra( + uint64_t a, uint64_t extra, uint_fast8_t dist ) +{ + struct uint64_extra z; + + z.v = a>>dist; + z.extra = a<<(-dist & 63) | (extra != 0); + return z; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_shortShiftRightM.c b/vendor/riscv-isa-sim/softfloat/s_shortShiftRightM.c new file mode 100644 index 00000000..308ad59c --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_shortShiftRightM.c @@ -0,0 +1,70 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_shortShiftRightM + +void + softfloat_shortShiftRightM( + uint_fast8_t size_words, + const uint32_t *aPtr, + uint_fast8_t dist, + uint32_t *zPtr + ) +{ + uint_fast8_t uNegDist; + unsigned int index, lastIndex; + uint32_t partWordZ, wordA; + + uNegDist = -dist; + index = indexWordLo( size_words ); + lastIndex = indexWordHi( size_words ); + partWordZ = aPtr[index]>>dist; + while ( index != lastIndex ) { + wordA = aPtr[index + wordIncr]; + zPtr[index] = wordA<<(uNegDist & 31) | partWordZ; + index += wordIncr; + partWordZ = wordA>>dist; + } + zPtr[index] = partWordZ; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_sub128.c b/vendor/riscv-isa-sim/softfloat/s_sub128.c new file mode 100644 index 00000000..ed86e100 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_sub128.c @@ -0,0 +1,55 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_sub128 + +struct uint128 + softfloat_sub128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ) +{ + struct uint128 z; + + z.v0 = a0 - b0; + z.v64 = a64 - b64 - (a0 < b0); + return z; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_sub1XM.c b/vendor/riscv-isa-sim/softfloat/s_sub1XM.c new file mode 100644 index 00000000..73773e5b --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_sub1XM.c @@ -0,0 +1,60 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_sub1XM + +void softfloat_sub1XM( uint_fast8_t size_words, uint32_t *zPtr ) +{ + unsigned int index, lastIndex; + uint32_t wordA; + + index = indexWordLo( size_words ); + lastIndex = indexWordHi( size_words ); + for (;;) { + wordA = zPtr[index]; + zPtr[index] = wordA - 1; + if ( wordA || (index == lastIndex) ) break; + index += wordIncr; + } + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_sub256M.c b/vendor/riscv-isa-sim/softfloat/s_sub256M.c new file mode 100644 index 00000000..c07b45ea --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_sub256M.c @@ -0,0 +1,65 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_sub256M + +void + softfloat_sub256M( + const uint64_t *aPtr, const uint64_t *bPtr, uint64_t *zPtr ) +{ + unsigned int index; + uint_fast8_t borrow; + uint64_t wordA, wordB; + + index = indexWordLo( 4 ); + borrow = 0; + for (;;) { + wordA = aPtr[index]; + wordB = bPtr[index]; + zPtr[index] = wordA - wordB - borrow; + if ( index == indexWordHi( 4 ) ) break; + borrow = borrow ? (wordA <= wordB) : (wordA < wordB); + index += wordIncr; + } + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_subM.c b/vendor/riscv-isa-sim/softfloat/s_subM.c new file mode 100644 index 00000000..003f699f --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_subM.c @@ -0,0 +1,70 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_subM + +void + softfloat_subM( + uint_fast8_t size_words, + const uint32_t *aPtr, + const uint32_t *bPtr, + uint32_t *zPtr + ) +{ + unsigned int index, lastIndex; + uint_fast8_t borrow; + uint32_t wordA, wordB; + + index = indexWordLo( size_words ); + lastIndex = indexWordHi( size_words ); + borrow = 0; + for (;;) { + wordA = aPtr[index]; + wordB = bPtr[index]; + zPtr[index] = wordA - wordB - borrow; + if ( index == lastIndex ) break; + borrow = borrow ? (wordA <= wordB) : (wordA < wordB); + index += wordIncr; + } + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_subMagsF128.c b/vendor/riscv-isa-sim/softfloat/s_subMagsF128.c new file mode 100644 index 00000000..c4264d54 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_subMagsF128.c @@ -0,0 +1,139 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float128_t + softfloat_subMagsF128( + uint_fast64_t uiA64, + uint_fast64_t uiA0, + uint_fast64_t uiB64, + uint_fast64_t uiB0, + bool signZ + ) +{ + int_fast32_t expA; + struct uint128 sigA; + int_fast32_t expB; + struct uint128 sigB, sigZ; + int_fast32_t expDiff, expZ; + struct uint128 uiZ; + union ui128_f128 uZ; + + expA = expF128UI64( uiA64 ); + sigA.v64 = fracF128UI64( uiA64 ); + sigA.v0 = uiA0; + expB = expF128UI64( uiB64 ); + sigB.v64 = fracF128UI64( uiB64 ); + sigB.v0 = uiB0; + sigA = softfloat_shortShiftLeft128( sigA.v64, sigA.v0, 4 ); + sigB = softfloat_shortShiftLeft128( sigB.v64, sigB.v0, 4 ); + expDiff = expA - expB; + if ( 0 < expDiff ) goto expABigger; + if ( expDiff < 0 ) goto expBBigger; + if ( expA == 0x7FFF ) { + if ( sigA.v64 | sigA.v0 | sigB.v64 | sigB.v0 ) goto propagateNaN; + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ.v64 = defaultNaNF128UI64; + uiZ.v0 = defaultNaNF128UI0; + goto uiZ; + } + expZ = expA; + if ( ! expZ ) expZ = 1; + if ( sigB.v64 < sigA.v64 ) goto aBigger; + if ( sigA.v64 < sigB.v64 ) goto bBigger; + if ( sigB.v0 < sigA.v0 ) goto aBigger; + if ( sigA.v0 < sigB.v0 ) goto bBigger; + uiZ.v64 = + packToF128UI64( + (softfloat_roundingMode == softfloat_round_min), 0, 0 ); + uiZ.v0 = 0; + goto uiZ; + expBBigger: + if ( expB == 0x7FFF ) { + if ( sigB.v64 | sigB.v0 ) goto propagateNaN; + uiZ.v64 = packToF128UI64( signZ ^ 1, 0x7FFF, 0 ); + uiZ.v0 = 0; + goto uiZ; + } + if ( expA ) { + sigA.v64 |= UINT64_C( 0x0010000000000000 ); + } else { + ++expDiff; + if ( ! expDiff ) goto newlyAlignedBBigger; + } + sigA = softfloat_shiftRightJam128( sigA.v64, sigA.v0, -expDiff ); + newlyAlignedBBigger: + expZ = expB; + sigB.v64 |= UINT64_C( 0x0010000000000000 ); + bBigger: + signZ = ! signZ; + sigZ = softfloat_sub128( sigB.v64, sigB.v0, sigA.v64, sigA.v0 ); + goto normRoundPack; + expABigger: + if ( expA == 0x7FFF ) { + if ( sigA.v64 | sigA.v0 ) goto propagateNaN; + uiZ.v64 = uiA64; + uiZ.v0 = uiA0; + goto uiZ; + } + if ( expB ) { + sigB.v64 |= UINT64_C( 0x0010000000000000 ); + } else { + --expDiff; + if ( ! expDiff ) goto newlyAlignedABigger; + } + sigB = softfloat_shiftRightJam128( sigB.v64, sigB.v0, expDiff ); + newlyAlignedABigger: + expZ = expA; + sigA.v64 |= UINT64_C( 0x0010000000000000 ); + aBigger: + sigZ = softfloat_sub128( sigA.v64, sigA.v0, sigB.v64, sigB.v0 ); + normRoundPack: + return softfloat_normRoundPackToF128( signZ, expZ - 5, sigZ.v64, sigZ.v0 ); + propagateNaN: + uiZ = softfloat_propagateNaNF128UI( uiA64, uiA0, uiB64, uiB0 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_subMagsF16.c b/vendor/riscv-isa-sim/softfloat/s_subMagsF16.c new file mode 100644 index 00000000..5ec579e8 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_subMagsF16.c @@ -0,0 +1,187 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float16_t softfloat_subMagsF16( uint_fast16_t uiA, uint_fast16_t uiB ) +{ + int_fast8_t expA; + uint_fast16_t sigA; + int_fast8_t expB; + uint_fast16_t sigB; + int_fast8_t expDiff; + uint_fast16_t uiZ; + int_fast16_t sigDiff; + bool signZ; + int_fast8_t shiftDist, expZ; + uint_fast16_t sigZ, sigX, sigY; + uint_fast32_t sig32Z; + int_fast8_t roundingMode; + union ui16_f16 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expA = expF16UI( uiA ); + sigA = fracF16UI( uiA ); + expB = expF16UI( uiB ); + sigB = fracF16UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expA - expB; + if ( ! expDiff ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expA == 0x1F ) { + if ( sigA | sigB ) goto propagateNaN; + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF16UI; + goto uiZ; + } + sigDiff = sigA - sigB; + if ( ! sigDiff ) { + uiZ = + packToF16UI( + (softfloat_roundingMode == softfloat_round_min), 0, 0 ); + goto uiZ; + } + if ( expA ) --expA; + signZ = signF16UI( uiA ); + if ( sigDiff < 0 ) { + signZ = ! signZ; + sigDiff = -sigDiff; + } + shiftDist = softfloat_countLeadingZeros16( sigDiff ) - 5; + expZ = expA - shiftDist; + if ( expZ < 0 ) { + shiftDist = expA; + expZ = 0; + } + sigZ = sigDiff<>16; + if ( sig32Z & 0xFFFF ) { + sigZ |= 1; + } else { + if ( ! (sigZ & 0xF) && ((unsigned int) expZ < 0x1E) ) { + sigZ >>= 4; + goto pack; + } + } + return softfloat_roundPackToF16( signZ, expZ, sigZ ); + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF16UI( uiA, uiB ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + subEpsilon: + roundingMode = softfloat_roundingMode; + if ( roundingMode != softfloat_round_near_even ) { + if ( + (roundingMode == softfloat_round_minMag) + || (roundingMode + == (signF16UI( uiZ ) ? softfloat_round_max + : softfloat_round_min)) + ) { + --uiZ; + } +#ifdef SOFTFLOAT_ROUND_ODD + else if ( roundingMode == softfloat_round_odd ) { + uiZ = (uiZ - 1) | 1; + } +#endif + } + softfloat_exceptionFlags |= softfloat_flag_inexact; + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + pack: + uiZ = packToF16UI( signZ, expZ, sigZ ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_subMagsF32.c b/vendor/riscv-isa-sim/softfloat/s_subMagsF32.c new file mode 100644 index 00000000..86e89f2e --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_subMagsF32.c @@ -0,0 +1,143 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t softfloat_subMagsF32( uint_fast32_t uiA, uint_fast32_t uiB ) +{ + int_fast16_t expA; + uint_fast32_t sigA; + int_fast16_t expB; + uint_fast32_t sigB; + int_fast16_t expDiff; + uint_fast32_t uiZ; + int_fast32_t sigDiff; + bool signZ; + int_fast8_t shiftDist; + int_fast16_t expZ; + uint_fast32_t sigX, sigY; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + expB = expF32UI( uiB ); + sigB = fracF32UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expA - expB; + if ( ! expDiff ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expA == 0xFF ) { + if ( sigA | sigB ) goto propagateNaN; + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF32UI; + goto uiZ; + } + sigDiff = sigA - sigB; + if ( ! sigDiff ) { + uiZ = + packToF32UI( + (softfloat_roundingMode == softfloat_round_min), 0, 0 ); + goto uiZ; + } + if ( expA ) --expA; + signZ = signF32UI( uiA ); + if ( sigDiff < 0 ) { + signZ = ! signZ; + sigDiff = -sigDiff; + } + shiftDist = softfloat_countLeadingZeros32( sigDiff ) - 8; + expZ = expA - shiftDist; + if ( expZ < 0 ) { + shiftDist = expA; + expZ = 0; + } + uiZ = packToF32UI( signZ, expZ, sigDiff< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t + softfloat_subMagsF64( uint_fast64_t uiA, uint_fast64_t uiB, bool signZ ) +{ + int_fast16_t expA; + uint_fast64_t sigA; + int_fast16_t expB; + uint_fast64_t sigB; + int_fast16_t expDiff; + uint_fast64_t uiZ; + int_fast64_t sigDiff; + int_fast8_t shiftDist; + int_fast16_t expZ; + uint_fast64_t sigZ; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expA - expB; + if ( ! expDiff ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expA == 0x7FF ) { + if ( sigA | sigB ) goto propagateNaN; + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF64UI; + goto uiZ; + } + sigDiff = sigA - sigB; + if ( ! sigDiff ) { + uiZ = + packToF64UI( + (softfloat_roundingMode == softfloat_round_min), 0, 0 ); + goto uiZ; + } + if ( expA ) --expA; + if ( sigDiff < 0 ) { + signZ = ! signZ; + sigDiff = -sigDiff; + } + shiftDist = softfloat_countLeadingZeros64( sigDiff ) - 11; + expZ = expA - shiftDist; + if ( expZ < 0 ) { + shiftDist = expA; + expZ = 0; + } + uiZ = packToF64UI( signZ, expZ, sigDiff< +#include +#include "softfloat_types.h" + +#ifndef THREAD_LOCAL +#define THREAD_LOCAL +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/*---------------------------------------------------------------------------- +| Software floating-point underflow tininess-detection mode. +*----------------------------------------------------------------------------*/ +extern THREAD_LOCAL uint_fast8_t softfloat_detectTininess; +enum { + softfloat_tininess_beforeRounding = 0, + softfloat_tininess_afterRounding = 1 +}; + +/*---------------------------------------------------------------------------- +| Software floating-point rounding mode. (Mode "odd" is supported only if +| SoftFloat is compiled with macro 'SOFTFLOAT_ROUND_ODD' defined.) +*----------------------------------------------------------------------------*/ +extern THREAD_LOCAL uint_fast8_t softfloat_roundingMode; +enum { + softfloat_round_near_even = 0, + softfloat_round_minMag = 1, + softfloat_round_min = 2, + softfloat_round_max = 3, + softfloat_round_near_maxMag = 4, + softfloat_round_odd = 5 +}; + +/*---------------------------------------------------------------------------- +| Software floating-point exception flags. +*----------------------------------------------------------------------------*/ +extern THREAD_LOCAL uint_fast8_t softfloat_exceptionFlags; +enum { + softfloat_flag_inexact = 1, + softfloat_flag_underflow = 2, + softfloat_flag_overflow = 4, + softfloat_flag_infinite = 8, + softfloat_flag_invalid = 16 +}; + +/*---------------------------------------------------------------------------- +| Routine to raise any or all of the software floating-point exception flags. +*----------------------------------------------------------------------------*/ +void softfloat_raiseFlags( uint_fast8_t ); + +/*---------------------------------------------------------------------------- +| Integer-to-floating-point conversion routines. +*----------------------------------------------------------------------------*/ +float16_t ui32_to_f16( uint32_t ); +float32_t ui32_to_f32( uint32_t ); +float64_t ui32_to_f64( uint32_t ); +#ifdef SOFTFLOAT_FAST_INT64 +extFloat80_t ui32_to_extF80( uint32_t ); +float128_t ui32_to_f128( uint32_t ); +#endif +void ui32_to_extF80M( uint32_t, extFloat80_t * ); +void ui32_to_f128M( uint32_t, float128_t * ); +float16_t ui64_to_f16( uint64_t ); +float32_t ui64_to_f32( uint64_t ); +float64_t ui64_to_f64( uint64_t ); +#ifdef SOFTFLOAT_FAST_INT64 +extFloat80_t ui64_to_extF80( uint64_t ); +float128_t ui64_to_f128( uint64_t ); +#endif +void ui64_to_extF80M( uint64_t, extFloat80_t * ); +void ui64_to_f128M( uint64_t, float128_t * ); +float16_t i32_to_f16( int32_t ); +float32_t i32_to_f32( int32_t ); +float64_t i32_to_f64( int32_t ); +#ifdef SOFTFLOAT_FAST_INT64 +extFloat80_t i32_to_extF80( int32_t ); +float128_t i32_to_f128( int32_t ); +#endif +void i32_to_extF80M( int32_t, extFloat80_t * ); +void i32_to_f128M( int32_t, float128_t * ); +float16_t i64_to_f16( int64_t ); +float32_t i64_to_f32( int64_t ); +float64_t i64_to_f64( int64_t ); +#ifdef SOFTFLOAT_FAST_INT64 +extFloat80_t i64_to_extF80( int64_t ); +float128_t i64_to_f128( int64_t ); +#endif +void i64_to_extF80M( int64_t, extFloat80_t * ); +void i64_to_f128M( int64_t, float128_t * ); + +/*---------------------------------------------------------------------------- +| 16-bit (half-precision) floating-point operations. +*----------------------------------------------------------------------------*/ +uint_fast8_t f16_to_ui8( float16_t, uint_fast8_t, bool ); +uint_fast16_t f16_to_ui16( float16_t, uint_fast8_t, bool ); +uint_fast32_t f16_to_ui32( float16_t, uint_fast8_t, bool ); +uint_fast64_t f16_to_ui64( float16_t, uint_fast8_t, bool ); +int_fast8_t f16_to_i8( float16_t, uint_fast8_t, bool ); +int_fast16_t f16_to_i16( float16_t, uint_fast8_t, bool ); +int_fast32_t f16_to_i32( float16_t, uint_fast8_t, bool ); +int_fast64_t f16_to_i64( float16_t, uint_fast8_t, bool ); +uint_fast32_t f16_to_ui32_r_minMag( float16_t, bool ); +uint_fast64_t f16_to_ui64_r_minMag( float16_t, bool ); +int_fast32_t f16_to_i32_r_minMag( float16_t, bool ); +int_fast64_t f16_to_i64_r_minMag( float16_t, bool ); +float32_t f16_to_f32( float16_t ); +float64_t f16_to_f64( float16_t ); +#ifdef SOFTFLOAT_FAST_INT64 +extFloat80_t f16_to_extF80( float16_t ); +float128_t f16_to_f128( float16_t ); +#endif +void f16_to_extF80M( float16_t, extFloat80_t * ); +void f16_to_f128M( float16_t, float128_t * ); +float16_t f16_roundToInt( float16_t, uint_fast8_t, bool ); +float16_t f16_add( float16_t, float16_t ); +float16_t f16_sub( float16_t, float16_t ); +float16_t f16_max( float16_t, float16_t ); +float16_t f16_min( float16_t, float16_t ); +float16_t f16_mul( float16_t, float16_t ); +float16_t f16_mulAdd( float16_t, float16_t, float16_t ); +float16_t f16_div( float16_t, float16_t ); +float16_t f16_rem( float16_t, float16_t ); +float16_t f16_sqrt( float16_t ); +bool f16_eq( float16_t, float16_t ); +bool f16_le( float16_t, float16_t ); +bool f16_lt( float16_t, float16_t ); +bool f16_eq_signaling( float16_t, float16_t ); +bool f16_le_quiet( float16_t, float16_t ); +bool f16_lt_quiet( float16_t, float16_t ); +bool f16_isSignalingNaN( float16_t ); +uint_fast16_t f16_classify( float16_t ); +float16_t f16_rsqrte7( float16_t ); +float16_t f16_recip7( float16_t ); + +/*---------------------------------------------------------------------------- +| 32-bit (single-precision) floating-point operations. +*----------------------------------------------------------------------------*/ +uint_fast16_t f32_to_ui16( float32_t, uint_fast8_t, bool ); +uint_fast32_t f32_to_ui32( float32_t, uint_fast8_t, bool ); +uint_fast64_t f32_to_ui64( float32_t, uint_fast8_t, bool ); +int_fast16_t f32_to_i16( float32_t, uint_fast8_t, bool ); +int_fast32_t f32_to_i32( float32_t, uint_fast8_t, bool ); +int_fast64_t f32_to_i64( float32_t, uint_fast8_t, bool ); +uint_fast32_t f32_to_ui32_r_minMag( float32_t, bool ); +uint_fast64_t f32_to_ui64_r_minMag( float32_t, bool ); +int_fast32_t f32_to_i32_r_minMag( float32_t, bool ); +int_fast64_t f32_to_i64_r_minMag( float32_t, bool ); +float16_t f32_to_f16( float32_t ); +float64_t f32_to_f64( float32_t ); +#ifdef SOFTFLOAT_FAST_INT64 +extFloat80_t f32_to_extF80( float32_t ); +float128_t f32_to_f128( float32_t ); +#endif +void f32_to_extF80M( float32_t, extFloat80_t * ); +void f32_to_f128M( float32_t, float128_t * ); +float32_t f32_roundToInt( float32_t, uint_fast8_t, bool ); +float32_t f32_add( float32_t, float32_t ); +float32_t f32_sub( float32_t, float32_t ); +float32_t f32_max( float32_t, float32_t ); +float32_t f32_min( float32_t, float32_t ); +float32_t f32_mul( float32_t, float32_t ); +float32_t f32_mulAdd( float32_t, float32_t, float32_t ); +float32_t f32_div( float32_t, float32_t ); +float32_t f32_rem( float32_t, float32_t ); +float32_t f32_sqrt( float32_t ); +bool f32_eq( float32_t, float32_t ); +bool f32_le( float32_t, float32_t ); +bool f32_lt( float32_t, float32_t ); +bool f32_eq_signaling( float32_t, float32_t ); +bool f32_le_quiet( float32_t, float32_t ); +bool f32_lt_quiet( float32_t, float32_t ); +bool f32_isSignalingNaN( float32_t ); +uint_fast16_t f32_classify( float32_t ); +float32_t f32_rsqrte7( float32_t ); +float32_t f32_recip7( float32_t ); + +/*---------------------------------------------------------------------------- +| 64-bit (double-precision) floating-point operations. +*----------------------------------------------------------------------------*/ +uint_fast32_t f64_to_ui32( float64_t, uint_fast8_t, bool ); +uint_fast64_t f64_to_ui64( float64_t, uint_fast8_t, bool ); +int_fast32_t f64_to_i32( float64_t, uint_fast8_t, bool ); +int_fast64_t f64_to_i64( float64_t, uint_fast8_t, bool ); +uint_fast32_t f64_to_ui32_r_minMag( float64_t, bool ); +uint_fast64_t f64_to_ui64_r_minMag( float64_t, bool ); +int_fast32_t f64_to_i32_r_minMag( float64_t, bool ); +int_fast64_t f64_to_i64_r_minMag( float64_t, bool ); +float16_t f64_to_f16( float64_t ); +float32_t f64_to_f32( float64_t ); +#ifdef SOFTFLOAT_FAST_INT64 +extFloat80_t f64_to_extF80( float64_t ); +float128_t f64_to_f128( float64_t ); +#endif +void f64_to_extF80M( float64_t, extFloat80_t * ); +void f64_to_f128M( float64_t, float128_t * ); +float64_t f64_roundToInt( float64_t, uint_fast8_t, bool ); +float64_t f64_add( float64_t, float64_t ); +float64_t f64_sub( float64_t, float64_t ); +float64_t f64_max( float64_t, float64_t ); +float64_t f64_min( float64_t, float64_t ); +float64_t f64_mul( float64_t, float64_t ); +float64_t f64_mulAdd( float64_t, float64_t, float64_t ); +float64_t f64_div( float64_t, float64_t ); +float64_t f64_rem( float64_t, float64_t ); +float64_t f64_sqrt( float64_t ); +bool f64_eq( float64_t, float64_t ); +bool f64_le( float64_t, float64_t ); +bool f64_lt( float64_t, float64_t ); +bool f64_eq_signaling( float64_t, float64_t ); +bool f64_le_quiet( float64_t, float64_t ); +bool f64_lt_quiet( float64_t, float64_t ); +bool f64_isSignalingNaN( float64_t ); +uint_fast16_t f64_classify( float64_t ); +float64_t f64_rsqrte7( float64_t ); +float64_t f64_recip7( float64_t ); + +/*---------------------------------------------------------------------------- +| Rounding precision for 80-bit extended double-precision floating-point. +| Valid values are 32, 64, and 80. +*----------------------------------------------------------------------------*/ +extern THREAD_LOCAL uint_fast8_t extF80_roundingPrecision; + +/*---------------------------------------------------------------------------- +| 80-bit extended double-precision floating-point operations. +*----------------------------------------------------------------------------*/ +#ifdef SOFTFLOAT_FAST_INT64 +uint_fast32_t extF80_to_ui32( extFloat80_t, uint_fast8_t, bool ); +uint_fast64_t extF80_to_ui64( extFloat80_t, uint_fast8_t, bool ); +int_fast32_t extF80_to_i32( extFloat80_t, uint_fast8_t, bool ); +int_fast64_t extF80_to_i64( extFloat80_t, uint_fast8_t, bool ); +uint_fast32_t extF80_to_ui32_r_minMag( extFloat80_t, bool ); +uint_fast64_t extF80_to_ui64_r_minMag( extFloat80_t, bool ); +int_fast32_t extF80_to_i32_r_minMag( extFloat80_t, bool ); +int_fast64_t extF80_to_i64_r_minMag( extFloat80_t, bool ); +float16_t extF80_to_f16( extFloat80_t ); +float32_t extF80_to_f32( extFloat80_t ); +float64_t extF80_to_f64( extFloat80_t ); +float128_t extF80_to_f128( extFloat80_t ); +extFloat80_t extF80_roundToInt( extFloat80_t, uint_fast8_t, bool ); +extFloat80_t extF80_add( extFloat80_t, extFloat80_t ); +extFloat80_t extF80_sub( extFloat80_t, extFloat80_t ); +extFloat80_t extF80_mul( extFloat80_t, extFloat80_t ); +extFloat80_t extF80_div( extFloat80_t, extFloat80_t ); +extFloat80_t extF80_rem( extFloat80_t, extFloat80_t ); +extFloat80_t extF80_sqrt( extFloat80_t ); +bool extF80_eq( extFloat80_t, extFloat80_t ); +bool extF80_le( extFloat80_t, extFloat80_t ); +bool extF80_lt( extFloat80_t, extFloat80_t ); +bool extF80_eq_signaling( extFloat80_t, extFloat80_t ); +bool extF80_le_quiet( extFloat80_t, extFloat80_t ); +bool extF80_lt_quiet( extFloat80_t, extFloat80_t ); +bool extF80_isSignalingNaN( extFloat80_t ); +#endif +uint_fast32_t extF80M_to_ui32( const extFloat80_t *, uint_fast8_t, bool ); +uint_fast64_t extF80M_to_ui64( const extFloat80_t *, uint_fast8_t, bool ); +int_fast32_t extF80M_to_i32( const extFloat80_t *, uint_fast8_t, bool ); +int_fast64_t extF80M_to_i64( const extFloat80_t *, uint_fast8_t, bool ); +uint_fast32_t extF80M_to_ui32_r_minMag( const extFloat80_t *, bool ); +uint_fast64_t extF80M_to_ui64_r_minMag( const extFloat80_t *, bool ); +int_fast32_t extF80M_to_i32_r_minMag( const extFloat80_t *, bool ); +int_fast64_t extF80M_to_i64_r_minMag( const extFloat80_t *, bool ); +float16_t extF80M_to_f16( const extFloat80_t * ); +float32_t extF80M_to_f32( const extFloat80_t * ); +float64_t extF80M_to_f64( const extFloat80_t * ); +void extF80M_to_f128M( const extFloat80_t *, float128_t * ); +void + extF80M_roundToInt( + const extFloat80_t *, uint_fast8_t, bool, extFloat80_t * ); +void extF80M_add( const extFloat80_t *, const extFloat80_t *, extFloat80_t * ); +void extF80M_sub( const extFloat80_t *, const extFloat80_t *, extFloat80_t * ); +void extF80M_mul( const extFloat80_t *, const extFloat80_t *, extFloat80_t * ); +void extF80M_div( const extFloat80_t *, const extFloat80_t *, extFloat80_t * ); +void extF80M_rem( const extFloat80_t *, const extFloat80_t *, extFloat80_t * ); +void extF80M_sqrt( const extFloat80_t *, extFloat80_t * ); +bool extF80M_eq( const extFloat80_t *, const extFloat80_t * ); +bool extF80M_le( const extFloat80_t *, const extFloat80_t * ); +bool extF80M_lt( const extFloat80_t *, const extFloat80_t * ); +bool extF80M_eq_signaling( const extFloat80_t *, const extFloat80_t * ); +bool extF80M_le_quiet( const extFloat80_t *, const extFloat80_t * ); +bool extF80M_lt_quiet( const extFloat80_t *, const extFloat80_t * ); +bool extF80M_isSignalingNaN( const extFloat80_t * ); + +/*---------------------------------------------------------------------------- +| 128-bit (quadruple-precision) floating-point operations. +*----------------------------------------------------------------------------*/ +#ifdef SOFTFLOAT_FAST_INT64 +uint_fast32_t f128_to_ui32( float128_t, uint_fast8_t, bool ); +uint_fast64_t f128_to_ui64( float128_t, uint_fast8_t, bool ); +int_fast32_t f128_to_i32( float128_t, uint_fast8_t, bool ); +int_fast64_t f128_to_i64( float128_t, uint_fast8_t, bool ); +uint_fast32_t f128_to_ui32_r_minMag( float128_t, bool ); +uint_fast64_t f128_to_ui64_r_minMag( float128_t, bool ); +int_fast32_t f128_to_i32_r_minMag( float128_t, bool ); +int_fast64_t f128_to_i64_r_minMag( float128_t, bool ); +float16_t f128_to_f16( float128_t ); +float32_t f128_to_f32( float128_t ); +float64_t f128_to_f64( float128_t ); +extFloat80_t f128_to_extF80( float128_t ); +float128_t f128_roundToInt( float128_t, uint_fast8_t, bool ); +float128_t f128_add( float128_t, float128_t ); +float128_t f128_sub( float128_t, float128_t ); +float128_t f128_mul( float128_t, float128_t ); +float128_t f128_mulAdd( float128_t, float128_t, float128_t ); +float128_t f128_div( float128_t, float128_t ); +float128_t f128_rem( float128_t, float128_t ); +float128_t f128_sqrt( float128_t ); +bool f128_eq( float128_t, float128_t ); +bool f128_le( float128_t, float128_t ); +bool f128_lt( float128_t, float128_t ); +bool f128_eq_signaling( float128_t, float128_t ); +bool f128_le_quiet( float128_t, float128_t ); +bool f128_lt_quiet( float128_t, float128_t ); +bool f128_isSignalingNaN( float128_t ); +uint_fast16_t f128_classify( float128_t ); +#endif +uint_fast32_t f128M_to_ui32( const float128_t *, uint_fast8_t, bool ); +uint_fast64_t f128M_to_ui64( const float128_t *, uint_fast8_t, bool ); +int_fast32_t f128M_to_i32( const float128_t *, uint_fast8_t, bool ); +int_fast64_t f128M_to_i64( const float128_t *, uint_fast8_t, bool ); +uint_fast32_t f128M_to_ui32_r_minMag( const float128_t *, bool ); +uint_fast64_t f128M_to_ui64_r_minMag( const float128_t *, bool ); +int_fast32_t f128M_to_i32_r_minMag( const float128_t *, bool ); +int_fast64_t f128M_to_i64_r_minMag( const float128_t *, bool ); +float16_t f128M_to_f16( const float128_t * ); +float32_t f128M_to_f32( const float128_t * ); +float64_t f128M_to_f64( const float128_t * ); +void f128M_to_extF80M( const float128_t *, extFloat80_t * ); +void f128M_roundToInt( const float128_t *, uint_fast8_t, bool, float128_t * ); +void f128M_add( const float128_t *, const float128_t *, float128_t * ); +void f128M_sub( const float128_t *, const float128_t *, float128_t * ); +void f128M_mul( const float128_t *, const float128_t *, float128_t * ); +void + f128M_mulAdd( + const float128_t *, const float128_t *, const float128_t *, float128_t * + ); +void f128M_div( const float128_t *, const float128_t *, float128_t * ); +void f128M_rem( const float128_t *, const float128_t *, float128_t * ); +void f128M_sqrt( const float128_t *, float128_t * ); +bool f128M_eq( const float128_t *, const float128_t * ); +bool f128M_le( const float128_t *, const float128_t * ); +bool f128M_lt( const float128_t *, const float128_t * ); +bool f128M_eq_signaling( const float128_t *, const float128_t * ); +bool f128M_le_quiet( const float128_t *, const float128_t * ); +bool f128M_lt_quiet( const float128_t *, const float128_t * ); +bool f128M_isSignalingNaN( const float128_t * ); + +#ifdef __cplusplus +} +#endif + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/softfloat.mk.in b/vendor/riscv-isa-sim/softfloat/softfloat.mk.in new file mode 100644 index 00000000..a20ab7ee --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/softfloat.mk.in @@ -0,0 +1,241 @@ +softfloat_subproject_deps = + +softfloat_hdrs = \ + internals.h \ + platform.h \ + primitives.h \ + primitiveTypes.h \ + softfloat.h \ + softfloat_types.h \ + specialize.h \ + +softfloat_c_srcs = \ + f128_add.c \ + f128_classify.c \ + f128_div.c \ + f128_eq.c \ + f128_eq_signaling.c \ + f128_isSignalingNaN.c \ + f128_le.c \ + f128_le_quiet.c \ + f128_lt.c \ + f128_lt_quiet.c \ + f128_mulAdd.c \ + f128_mul.c \ + f128_rem.c \ + f128_roundToInt.c \ + f128_sqrt.c \ + f128_sub.c \ + f128_to_f16.c \ + f128_to_f32.c \ + f128_to_f64.c \ + f128_to_i32.c \ + f128_to_i32_r_minMag.c \ + f128_to_i64.c \ + f128_to_i64_r_minMag.c \ + f128_to_ui32.c \ + f128_to_ui32_r_minMag.c \ + f128_to_ui64.c \ + f128_to_ui64_r_minMag.c \ + f16_add.c \ + f16_classify.c \ + f16_div.c \ + f16_eq.c \ + f16_eq_signaling.c \ + f16_isSignalingNaN.c \ + f16_le.c \ + f16_le_quiet.c \ + f16_lt.c \ + f16_lt_quiet.c \ + f16_mulAdd.c \ + f16_mul.c \ + f16_rem.c \ + f16_roundToInt.c \ + f16_sqrt.c \ + f16_sub.c \ + f16_to_f128.c \ + f16_to_f32.c \ + f16_to_f64.c \ + f16_to_i8.c \ + f16_to_i16.c \ + f16_to_i32.c \ + f16_to_i32_r_minMag.c \ + f16_to_i64.c \ + f16_to_i64_r_minMag.c \ + f16_to_ui8.c \ + f16_to_ui16.c \ + f16_to_ui32.c \ + f16_to_ui32_r_minMag.c \ + f16_to_ui64.c \ + f16_to_ui64_r_minMag.c \ + f32_add.c \ + f32_classify.c \ + f32_div.c \ + f32_eq.c \ + f32_eq_signaling.c \ + f32_isSignalingNaN.c \ + f32_le.c \ + f32_le_quiet.c \ + f32_lt.c \ + f32_lt_quiet.c \ + f32_mulAdd.c \ + f32_mul.c \ + f32_rem.c \ + f32_roundToInt.c \ + f32_sqrt.c \ + f32_sub.c \ + f32_to_f128.c \ + f32_to_f16.c \ + f32_to_f64.c \ + f32_to_i16.c \ + f32_to_i32.c \ + f32_to_i32_r_minMag.c \ + f32_to_i64.c \ + f32_to_i64_r_minMag.c \ + f32_to_ui16.c \ + f32_to_ui32.c \ + f32_to_ui32_r_minMag.c \ + f32_to_ui64.c \ + f32_to_ui64_r_minMag.c \ + f64_add.c \ + f64_classify.c \ + f64_div.c \ + f64_eq.c \ + f64_eq_signaling.c \ + f64_isSignalingNaN.c \ + f64_le.c \ + f64_le_quiet.c \ + f64_lt.c \ + f64_lt_quiet.c \ + f64_mulAdd.c \ + f64_mul.c \ + f64_rem.c \ + f64_roundToInt.c \ + f64_sqrt.c \ + f64_sub.c \ + f64_to_f128.c \ + f64_to_f16.c \ + f64_to_f32.c \ + f64_to_i32.c \ + f64_to_i32_r_minMag.c \ + f64_to_i64.c \ + f64_to_i64_r_minMag.c \ + f64_to_ui32.c \ + f64_to_ui32_r_minMag.c \ + f64_to_ui64.c \ + f64_to_ui64_r_minMag.c \ + fall_maxmin.c \ + fall_reciprocal.c \ + i32_to_f128.c \ + i32_to_f16.c \ + i32_to_f32.c \ + i32_to_f64.c \ + i64_to_f128.c \ + i64_to_f16.c \ + i64_to_f32.c \ + i64_to_f64.c \ + s_add128.c \ + s_add256M.c \ + s_addCarryM.c \ + s_addComplCarryM.c \ + s_addMagsF128.c \ + s_addMagsF16.c \ + s_addMagsF32.c \ + s_addMagsF64.c \ + s_addM.c \ + s_approxRecip_1Ks.c \ + s_approxRecip32_1.c \ + s_approxRecipSqrt_1Ks.c \ + s_approxRecipSqrt32_1.c \ + s_commonNaNToF32UI.c \ + s_commonNaNToF64UI.c \ + s_compare128M.c \ + s_compare96M.c \ + s_countLeadingZeros16.c \ + s_countLeadingZeros32.c \ + s_countLeadingZeros64.c \ + s_countLeadingZeros8.c \ + s_eq128.c \ + s_f32UIToCommonNaN.c \ + s_f64UIToCommonNaN.c \ + s_le128.c \ + s_lt128.c \ + s_mul128By32.c \ + s_mul128MTo256M.c \ + s_mul128To256M.c \ + s_mul64ByShifted32To128.c \ + s_mul64To128.c \ + s_mul64To128M.c \ + s_mulAddF128.c \ + s_mulAddF16.c \ + s_mulAddF32.c \ + s_mulAddF64.c \ + s_negXM.c \ + s_normRoundPackToF128.c \ + s_normRoundPackToF16.c \ + s_normRoundPackToF32.c \ + s_normRoundPackToF64.c \ + s_normSubnormalF128Sig.c \ + s_normSubnormalF16Sig.c \ + s_normSubnormalF32Sig.c \ + s_normSubnormalF64Sig.c \ + softfloat_raiseFlags.c \ + softfloat_state.c \ + s_propagateNaNF16UI.c \ + s_propagateNaNF32UI.c \ + s_propagateNaNF64UI.c \ + s_propagateNaNF128UI.c \ + s_remStepMBy32.c \ + s_roundMToI64.c \ + s_roundMToUI64.c \ + s_roundPackMToI64.c \ + s_roundPackMToUI64.c \ + s_roundPackToF128.c \ + s_roundPackToF16.c \ + s_roundPackToF32.c \ + s_roundPackToF64.c \ + s_roundPackToI32.c \ + s_roundPackToI64.c \ + s_roundPackToUI32.c \ + s_roundPackToUI64.c \ + s_roundToI32.c \ + s_roundToI64.c \ + s_roundToUI32.c \ + s_roundToUI64.c \ + s_shiftRightJam128.c \ + s_shiftRightJam128Extra.c \ + s_shiftRightJam256M.c \ + s_shiftRightJam32.c \ + s_shiftRightJam64.c \ + s_shiftRightJam64Extra.c \ + s_shortShiftLeft128.c \ + s_shortShiftLeft64To96M.c \ + s_shortShiftRight128.c \ + s_shortShiftRightExtendM.c \ + s_shortShiftRightJam128.c \ + s_shortShiftRightJam128Extra.c \ + s_shortShiftRightJam64.c \ + s_shortShiftRightJam64Extra.c \ + s_shortShiftRightM.c \ + s_sub128.c \ + s_sub1XM.c \ + s_sub256M.c \ + s_subMagsF128.c \ + s_subMagsF16.c \ + s_subMagsF32.c \ + s_subMagsF64.c \ + s_subM.c \ + ui32_to_f128.c \ + ui32_to_f16.c \ + ui32_to_f32.c \ + ui32_to_f64.c \ + ui64_to_f128.c \ + ui64_to_f16.c \ + ui64_to_f32.c \ + ui64_to_f64.c \ + +softfloat_install_shared_lib = yes + +softfloat_test_srcs = + +softfloat_install_prog_srcs = diff --git a/vendor/riscv-isa-sim/softfloat/softfloat_raiseFlags.c b/vendor/riscv-isa-sim/softfloat/softfloat_raiseFlags.c new file mode 100644 index 00000000..f2c25ade --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/softfloat_raiseFlags.c @@ -0,0 +1,52 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include "platform.h" +#include "softfloat.h" + +/*---------------------------------------------------------------------------- +| Raises the exceptions specified by `flags'. Floating-point traps can be +| defined here if desired. It is currently not possible for such a trap +| to substitute a result value. If traps are not implemented, this routine +| should be simply `softfloat_exceptionFlags |= flags;'. +*----------------------------------------------------------------------------*/ +void softfloat_raiseFlags( uint_fast8_t flags ) +{ + + softfloat_exceptionFlags |= flags; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/softfloat_state.c b/vendor/riscv-isa-sim/softfloat/softfloat_state.c new file mode 100644 index 00000000..a105e6f6 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/softfloat_state.c @@ -0,0 +1,52 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All Rights Reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +#ifndef THREAD_LOCAL +#define THREAD_LOCAL +#endif + +THREAD_LOCAL uint_fast8_t softfloat_roundingMode = softfloat_round_near_even; +THREAD_LOCAL uint_fast8_t softfloat_detectTininess = init_detectTininess; +THREAD_LOCAL uint_fast8_t softfloat_exceptionFlags = 0; + +THREAD_LOCAL uint_fast8_t extF80_roundingPrecision = 80; + diff --git a/vendor/riscv-isa-sim/softfloat/softfloat_types.h b/vendor/riscv-isa-sim/softfloat/softfloat_types.h new file mode 100644 index 00000000..af1888f9 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/softfloat_types.h @@ -0,0 +1,81 @@ + +/*============================================================================ + +This C header file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#ifndef softfloat_types_h +#define softfloat_types_h 1 + +#include + +/*---------------------------------------------------------------------------- +| Types used to pass 16-bit, 32-bit, 64-bit, and 128-bit floating-point +| arguments and results to/from functions. These types must be exactly +| 16 bits, 32 bits, 64 bits, and 128 bits in size, respectively. Where a +| platform has "native" support for IEEE-Standard floating-point formats, +| the types below may, if desired, be defined as aliases for the native types +| (typically 'float' and 'double', and possibly 'long double'). +*----------------------------------------------------------------------------*/ +typedef struct { uint16_t v; } float16_t; +typedef struct { uint32_t v; } float32_t; +typedef struct { uint64_t v; } float64_t; +typedef struct { uint64_t v[2]; } float128_t; + +/*---------------------------------------------------------------------------- +| The format of an 80-bit extended floating-point number in memory. This +| structure must contain a 16-bit field named 'signExp' and a 64-bit field +| named 'signif'. +*----------------------------------------------------------------------------*/ +#ifdef LITTLEENDIAN +struct extFloat80M { uint64_t signif; uint16_t signExp; }; +#else +struct extFloat80M { uint16_t signExp; uint64_t signif; }; +#endif + +/*---------------------------------------------------------------------------- +| The type used to pass 80-bit extended floating-point arguments and +| results to/from functions. This type must have size identical to +| 'struct extFloat80M'. Type 'extFloat80_t' can be defined as an alias for +| 'struct extFloat80M'. Alternatively, if a platform has "native" support +| for IEEE-Standard 80-bit extended floating-point, it may be possible, +| if desired, to define 'extFloat80_t' as an alias for the native type +| (presumably either 'long double' or a nonstandard compiler-intrinsic type). +| In that case, the 'signif' and 'signExp' fields of 'struct extFloat80M' +| must align exactly with the locations in memory of the sign, exponent, and +| significand of the native type. +*----------------------------------------------------------------------------*/ +typedef struct extFloat80M extFloat80_t; + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/specialize.h b/vendor/riscv-isa-sim/softfloat/specialize.h new file mode 100644 index 00000000..556476c1 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/specialize.h @@ -0,0 +1,429 @@ + +/*============================================================================ + +This C header file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#ifndef specialize_h +#define specialize_h 1 + +#include +#include +#include "primitiveTypes.h" +#include "softfloat.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/*---------------------------------------------------------------------------- +| Default value for `softfloat_detectTininess'. +*----------------------------------------------------------------------------*/ +#define init_detectTininess softfloat_tininess_afterRounding + +/*---------------------------------------------------------------------------- +| The values to return on conversions to 32-bit integer formats that raise an +| invalid exception. +*----------------------------------------------------------------------------*/ +#define ui8_fromPosOverflow 0xFF +#define ui8_fromNegOverflow 0 +#define ui8_fromNaN 0xFF +#define i8_fromPosOverflow 0x7F +#define i8_fromNegOverflow (-0x7F - 1) +#define i8_fromNaN 0x7F + +#define ui16_fromPosOverflow 0xFFFF +#define ui16_fromNegOverflow 0 +#define ui16_fromNaN 0xFFFF +#define i16_fromPosOverflow 0x7FFF +#define i16_fromNegOverflow (-0x7FFF - 1) +#define i16_fromNaN 0x7FFF + +#define ui32_fromPosOverflow 0xFFFFFFFF +#define ui32_fromNegOverflow 0 +#define ui32_fromNaN 0xFFFFFFFF +#define i32_fromPosOverflow 0x7FFFFFFF +#define i32_fromNegOverflow (-0x7FFFFFFF - 1) +#define i32_fromNaN 0x7FFFFFFF + +/*---------------------------------------------------------------------------- +| The values to return on conversions to 64-bit integer formats that raise an +| invalid exception. +*----------------------------------------------------------------------------*/ +#define ui64_fromPosOverflow UINT64_C( 0xFFFFFFFFFFFFFFFF ) +#define ui64_fromNegOverflow 0 +#define ui64_fromNaN UINT64_C( 0xFFFFFFFFFFFFFFFF ) +#define i64_fromPosOverflow UINT64_C( 0x7FFFFFFFFFFFFFFF ) +#define i64_fromNegOverflow (-UINT64_C( 0x7FFFFFFFFFFFFFFF ) - 1) +#define i64_fromNaN UINT64_C( 0x7FFFFFFFFFFFFFFF ) + +/*---------------------------------------------------------------------------- +| "Common NaN" structure, used to transfer NaN representations from one format +| to another. +*----------------------------------------------------------------------------*/ +struct commonNaN { char _unused; }; + +/*---------------------------------------------------------------------------- +| The bit pattern for a default generated 16-bit floating-point NaN. +*----------------------------------------------------------------------------*/ +#define defaultNaNF16UI 0x7E00 + +/*---------------------------------------------------------------------------- +| Returns true when 16-bit unsigned integer `uiA' has the bit pattern of a +| 16-bit floating-point signaling NaN. +| Note: This macro evaluates its argument more than once. +*----------------------------------------------------------------------------*/ +#define softfloat_isSigNaNF16UI( uiA ) ((((uiA) & 0x7E00) == 0x7C00) && ((uiA) & 0x01FF)) + +/*---------------------------------------------------------------------------- +| Assuming `uiA' has the bit pattern of a 16-bit floating-point NaN, converts +| this NaN to the common NaN form, and stores the resulting common NaN at the +| location pointed to by `zPtr'. If the NaN is a signaling NaN, the invalid +| exception is raised. +*----------------------------------------------------------------------------*/ +#define softfloat_f16UIToCommonNaN( uiA, zPtr ) if ( ! ((uiA) & 0x0200) ) softfloat_raiseFlags( softfloat_flag_invalid ) + +/*---------------------------------------------------------------------------- +| Converts the common NaN pointed to by `aPtr' into a 16-bit floating-point +| NaN, and returns the bit pattern of this value as an unsigned integer. +*----------------------------------------------------------------------------*/ +#define softfloat_commonNaNToF16UI( aPtr ) ((uint_fast16_t) defaultNaNF16UI) + +/*---------------------------------------------------------------------------- +| Interpreting `uiA' and `uiB' as the bit patterns of two 16-bit floating- +| point values, at least one of which is a NaN, returns the bit pattern of +| the combined NaN result. If either `uiA' or `uiB' has the pattern of a +| signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ +uint_fast16_t + softfloat_propagateNaNF16UI( uint_fast16_t uiA, uint_fast16_t uiB ); + +/*---------------------------------------------------------------------------- +| The bit pattern for a default generated 32-bit floating-point NaN. +*----------------------------------------------------------------------------*/ +#define defaultNaNF32UI 0x7FC00000 + +/*---------------------------------------------------------------------------- +| Returns true when 32-bit unsigned integer `uiA' has the bit pattern of a +| 32-bit floating-point signaling NaN. +| Note: This macro evaluates its argument more than once. +*----------------------------------------------------------------------------*/ +#define softfloat_isSigNaNF32UI( uiA ) ((((uiA) & 0x7FC00000) == 0x7F800000) && ((uiA) & 0x003FFFFF)) + +/*---------------------------------------------------------------------------- +| Assuming `uiA' has the bit pattern of a 32-bit floating-point NaN, converts +| this NaN to the common NaN form, and stores the resulting common NaN at the +| location pointed to by `zPtr'. If the NaN is a signaling NaN, the invalid +| exception is raised. +*----------------------------------------------------------------------------*/ +#define softfloat_f32UIToCommonNaN( uiA, zPtr ) if ( ! ((uiA) & 0x00400000) ) softfloat_raiseFlags( softfloat_flag_invalid ) + +/*---------------------------------------------------------------------------- +| Converts the common NaN pointed to by `aPtr' into a 32-bit floating-point +| NaN, and returns the bit pattern of this value as an unsigned integer. +*----------------------------------------------------------------------------*/ +#define softfloat_commonNaNToF32UI( aPtr ) ((uint_fast32_t) defaultNaNF32UI) + +/*---------------------------------------------------------------------------- +| Interpreting `uiA' and `uiB' as the bit patterns of two 32-bit floating- +| point values, at least one of which is a NaN, returns the bit pattern of +| the combined NaN result. If either `uiA' or `uiB' has the pattern of a +| signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ +uint_fast32_t + softfloat_propagateNaNF32UI( uint_fast32_t uiA, uint_fast32_t uiB ); + +/*---------------------------------------------------------------------------- +| The bit pattern for a default generated 64-bit floating-point NaN. +*----------------------------------------------------------------------------*/ +#define defaultNaNF64UI UINT64_C( 0x7FF8000000000000 ) + +/*---------------------------------------------------------------------------- +| Returns true when 64-bit unsigned integer `uiA' has the bit pattern of a +| 64-bit floating-point signaling NaN. +| Note: This macro evaluates its argument more than once. +*----------------------------------------------------------------------------*/ +#define softfloat_isSigNaNF64UI( uiA ) ((((uiA) & UINT64_C( 0x7FF8000000000000 )) == UINT64_C( 0x7FF0000000000000 )) && ((uiA) & UINT64_C( 0x0007FFFFFFFFFFFF ))) + +/*---------------------------------------------------------------------------- +| Assuming `uiA' has the bit pattern of a 64-bit floating-point NaN, converts +| this NaN to the common NaN form, and stores the resulting common NaN at the +| location pointed to by `zPtr'. If the NaN is a signaling NaN, the invalid +| exception is raised. +*----------------------------------------------------------------------------*/ +#define softfloat_f64UIToCommonNaN( uiA, zPtr ) if ( ! ((uiA) & UINT64_C( 0x0008000000000000 )) ) softfloat_raiseFlags( softfloat_flag_invalid ) + +/*---------------------------------------------------------------------------- +| Converts the common NaN pointed to by `aPtr' into a 64-bit floating-point +| NaN, and returns the bit pattern of this value as an unsigned integer. +*----------------------------------------------------------------------------*/ +#define softfloat_commonNaNToF64UI( aPtr ) ((uint_fast64_t) defaultNaNF64UI) + +/*---------------------------------------------------------------------------- +| Interpreting `uiA' and `uiB' as the bit patterns of two 64-bit floating- +| point values, at least one of which is a NaN, returns the bit pattern of +| the combined NaN result. If either `uiA' or `uiB' has the pattern of a +| signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ +uint_fast64_t + softfloat_propagateNaNF64UI( uint_fast64_t uiA, uint_fast64_t uiB ); + +/*---------------------------------------------------------------------------- +| The bit pattern for a default generated 80-bit extended floating-point NaN. +*----------------------------------------------------------------------------*/ +#define defaultNaNExtF80UI64 0x7FFF +#define defaultNaNExtF80UI0 UINT64_C( 0xC000000000000000 ) + +/*---------------------------------------------------------------------------- +| Returns true when the 80-bit unsigned integer formed from concatenating +| 16-bit `uiA64' and 64-bit `uiA0' has the bit pattern of an 80-bit extended +| floating-point signaling NaN. +| Note: This macro evaluates its arguments more than once. +*----------------------------------------------------------------------------*/ +#define softfloat_isSigNaNExtF80UI( uiA64, uiA0 ) ((((uiA64) & 0x7FFF) == 0x7FFF) && ! ((uiA0) & UINT64_C( 0x4000000000000000 )) && ((uiA0) & UINT64_C( 0x3FFFFFFFFFFFFFFF ))) + +#ifdef SOFTFLOAT_FAST_INT64 + +/*---------------------------------------------------------------------------- +| The following functions are needed only when `SOFTFLOAT_FAST_INT64' is +| defined. +*----------------------------------------------------------------------------*/ + +/*---------------------------------------------------------------------------- +| Assuming the unsigned integer formed from concatenating `uiA64' and `uiA0' +| has the bit pattern of an 80-bit extended floating-point NaN, converts +| this NaN to the common NaN form, and stores the resulting common NaN at the +| location pointed to by `zPtr'. If the NaN is a signaling NaN, the invalid +| exception is raised. +*----------------------------------------------------------------------------*/ +#define softfloat_extF80UIToCommonNaN( uiA64, uiA0, zPtr ) if ( ! ((uiA0) & UINT64_C( 0x4000000000000000 )) ) softfloat_raiseFlags( softfloat_flag_invalid ) + +/*---------------------------------------------------------------------------- +| Converts the common NaN pointed to by `aPtr' into an 80-bit extended +| floating-point NaN, and returns the bit pattern of this value as an unsigned +| integer. +*----------------------------------------------------------------------------*/ +#if defined INLINE && ! defined softfloat_commonNaNToExtF80UI +INLINE +struct uint128 softfloat_commonNaNToExtF80UI( const struct commonNaN *aPtr ) +{ + struct uint128 uiZ; + uiZ.v64 = defaultNaNExtF80UI64; + uiZ.v0 = defaultNaNExtF80UI0; + return uiZ; +} +#else +struct uint128 softfloat_commonNaNToExtF80UI( const struct commonNaN *aPtr ); +#endif + +/*---------------------------------------------------------------------------- +| Interpreting the unsigned integer formed from concatenating `uiA64' and +| `uiA0' as an 80-bit extended floating-point value, and likewise interpreting +| the unsigned integer formed from concatenating `uiB64' and `uiB0' as another +| 80-bit extended floating-point value, and assuming at least on of these +| floating-point values is a NaN, returns the bit pattern of the combined NaN +| result. If either original floating-point value is a signaling NaN, the +| invalid exception is raised. +*----------------------------------------------------------------------------*/ +struct uint128 + softfloat_propagateNaNExtF80UI( + uint_fast16_t uiA64, + uint_fast64_t uiA0, + uint_fast16_t uiB64, + uint_fast64_t uiB0 + ); + +/*---------------------------------------------------------------------------- +| The bit pattern for a default generated 128-bit floating-point NaN. +*----------------------------------------------------------------------------*/ +#define defaultNaNF128UI64 UINT64_C( 0x7FFF800000000000 ) +#define defaultNaNF128UI0 UINT64_C( 0 ) + +/*---------------------------------------------------------------------------- +| Returns true when the 128-bit unsigned integer formed from concatenating +| 64-bit `uiA64' and 64-bit `uiA0' has the bit pattern of a 128-bit floating- +| point signaling NaN. +| Note: This macro evaluates its arguments more than once. +*----------------------------------------------------------------------------*/ +#define softfloat_isSigNaNF128UI( uiA64, uiA0 ) ((((uiA64) & UINT64_C( 0x7FFF800000000000 )) == UINT64_C( 0x7FFF000000000000 )) && ((uiA0) || ((uiA64) & UINT64_C( 0x00007FFFFFFFFFFF )))) + +/*---------------------------------------------------------------------------- +| Assuming the unsigned integer formed from concatenating `uiA64' and `uiA0' +| has the bit pattern of a 128-bit floating-point NaN, converts this NaN to +| the common NaN form, and stores the resulting common NaN at the location +| pointed to by `zPtr'. If the NaN is a signaling NaN, the invalid exception +| is raised. +*----------------------------------------------------------------------------*/ +#define softfloat_f128UIToCommonNaN( uiA64, uiA0, zPtr ) if ( ! ((uiA64) & UINT64_C( 0x0000800000000000 )) ) softfloat_raiseFlags( softfloat_flag_invalid ) + +/*---------------------------------------------------------------------------- +| Converts the common NaN pointed to by `aPtr' into a 128-bit floating-point +| NaN, and returns the bit pattern of this value as an unsigned integer. +*----------------------------------------------------------------------------*/ +#if defined INLINE && ! defined softfloat_commonNaNToF128UI +INLINE +struct uint128 softfloat_commonNaNToF128UI( const struct commonNaN *aPtr ) +{ + struct uint128 uiZ; + uiZ.v64 = defaultNaNF128UI64; + uiZ.v0 = defaultNaNF128UI0; + return uiZ; +} +#else +struct uint128 softfloat_commonNaNToF128UI( const struct commonNaN * ); +#endif + +/*---------------------------------------------------------------------------- +| Interpreting the unsigned integer formed from concatenating `uiA64' and +| `uiA0' as a 128-bit floating-point value, and likewise interpreting the +| unsigned integer formed from concatenating `uiB64' and `uiB0' as another +| 128-bit floating-point value, and assuming at least on of these floating- +| point values is a NaN, returns the bit pattern of the combined NaN result. +| If either original floating-point value is a signaling NaN, the invalid +| exception is raised. +*----------------------------------------------------------------------------*/ +struct uint128 + softfloat_propagateNaNF128UI( + uint_fast64_t uiA64, + uint_fast64_t uiA0, + uint_fast64_t uiB64, + uint_fast64_t uiB0 + ); + +#else + +/*---------------------------------------------------------------------------- +| The following functions are needed only when `SOFTFLOAT_FAST_INT64' is not +| defined. +*----------------------------------------------------------------------------*/ + +/*---------------------------------------------------------------------------- +| Assuming the 80-bit extended floating-point value pointed to by `aSPtr' is +| a NaN, converts this NaN to the common NaN form, and stores the resulting +| common NaN at the location pointed to by `zPtr'. If the NaN is a signaling +| NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ +#define softfloat_extF80MToCommonNaN( aSPtr, zPtr ) if ( ! ((aSPtr)->signif & UINT64_C( 0x4000000000000000 )) ) softfloat_raiseFlags( softfloat_flag_invalid ) + +/*---------------------------------------------------------------------------- +| Converts the common NaN pointed to by `aPtr' into an 80-bit extended +| floating-point NaN, and stores this NaN at the location pointed to by +| `zSPtr'. +*----------------------------------------------------------------------------*/ +#if defined INLINE && ! defined softfloat_commonNaNToExtF80M +INLINE +void + softfloat_commonNaNToExtF80M( + const struct commonNaN *aPtr, struct extFloat80M *zSPtr ) +{ + zSPtr->signExp = defaultNaNExtF80UI64; + zSPtr->signif = defaultNaNExtF80UI0; +} +#else +void + softfloat_commonNaNToExtF80M( + const struct commonNaN *aPtr, struct extFloat80M *zSPtr ); +#endif + +/*---------------------------------------------------------------------------- +| Assuming at least one of the two 80-bit extended floating-point values +| pointed to by `aSPtr' and `bSPtr' is a NaN, stores the combined NaN result +| at the location pointed to by `zSPtr'. If either original floating-point +| value is a signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ +void + softfloat_propagateNaNExtF80M( + const struct extFloat80M *aSPtr, + const struct extFloat80M *bSPtr, + struct extFloat80M *zSPtr + ); + +/*---------------------------------------------------------------------------- +| The bit pattern for a default generated 128-bit floating-point NaN. +*----------------------------------------------------------------------------*/ +#define defaultNaNF128UI96 0x7FFF8000 +#define defaultNaNF128UI64 0 +#define defaultNaNF128UI32 0 +#define defaultNaNF128UI0 0 + +/*---------------------------------------------------------------------------- +| Assuming the 128-bit floating-point value pointed to by `aWPtr' is a NaN, +| converts this NaN to the common NaN form, and stores the resulting common +| NaN at the location pointed to by `zPtr'. If the NaN is a signaling NaN, +| the invalid exception is raised. Argument `aWPtr' points to an array of +| four 32-bit elements that concatenate in the platform's normal endian order +| to form a 128-bit floating-point value. +*----------------------------------------------------------------------------*/ +#define softfloat_f128MToCommonNaN( aWPtr, zPtr ) if ( ! ((aWPtr)[indexWordHi( 4 )] & UINT64_C( 0x0000800000000000 )) ) softfloat_raiseFlags( softfloat_flag_invalid ) + +/*---------------------------------------------------------------------------- +| Converts the common NaN pointed to by `aPtr' into a 128-bit floating-point +| NaN, and stores this NaN at the location pointed to by `zWPtr'. Argument +| `zWPtr' points to an array of four 32-bit elements that concatenate in the +| platform's normal endian order to form a 128-bit floating-point value. +*----------------------------------------------------------------------------*/ +#if defined INLINE && ! defined softfloat_commonNaNToF128M +INLINE +void + softfloat_commonNaNToF128M( const struct commonNaN *aPtr, uint32_t *zWPtr ) +{ + zWPtr[indexWord( 4, 3 )] = defaultNaNF128UI96; + zWPtr[indexWord( 4, 2 )] = defaultNaNF128UI64; + zWPtr[indexWord( 4, 1 )] = defaultNaNF128UI32; + zWPtr[indexWord( 4, 0 )] = defaultNaNF128UI0; +} +#else +void + softfloat_commonNaNToF128M( const struct commonNaN *aPtr, uint32_t *zWPtr ); +#endif + +/*---------------------------------------------------------------------------- +| Assuming at least one of the two 128-bit floating-point values pointed to by +| `aWPtr' and `bWPtr' is a NaN, stores the combined NaN result at the location +| pointed to by `zWPtr'. If either original floating-point value is a +| signaling NaN, the invalid exception is raised. Each of `aWPtr', `bWPtr', +| and `zWPtr' points to an array of four 32-bit elements that concatenate in +| the platform's normal endian order to form a 128-bit floating-point value. +*----------------------------------------------------------------------------*/ +void + softfloat_propagateNaNF128M( + const uint32_t *aWPtr, const uint32_t *bWPtr, uint32_t *zWPtr ); + +#endif + +#ifdef __cplusplus +} +#endif + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/ui32_to_f128.c b/vendor/riscv-isa-sim/softfloat/ui32_to_f128.c new file mode 100644 index 00000000..78d3eb64 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/ui32_to_f128.c @@ -0,0 +1,60 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All Rights Reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float128_t ui32_to_f128( uint32_t a ) +{ + uint_fast64_t uiZ64; + int_fast8_t shiftDist; + union ui128_f128 uZ; + + uiZ64 = 0; + if ( a ) { + shiftDist = softfloat_countLeadingZeros32( a ) + 17; + uiZ64 = + packToF128UI64( + 0, 0x402E - shiftDist, (uint_fast64_t) a< +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float16_t ui32_to_f16( uint32_t a ) +{ + int_fast8_t shiftDist; + union ui16_f16 u; + uint_fast16_t sig; + + shiftDist = softfloat_countLeadingZeros32( a ) - 21; + if ( 0 <= shiftDist ) { + u.ui = + a ? packToF16UI( + 0, 0x18 - shiftDist, (uint_fast16_t) a<>(-shiftDist) | ((uint32_t) (a<<(shiftDist & 31)) != 0) + : (uint_fast16_t) a< +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t ui32_to_f32( uint32_t a ) +{ + union ui32_f32 uZ; + + if ( ! a ) { + uZ.ui = 0; + return uZ.f; + } + if ( a & 0x80000000 ) { + return softfloat_roundPackToF32( 0, 0x9D, a>>1 | (a & 1) ); + } else { + return softfloat_normRoundPackToF32( 0, 0x9C, a ); + } + +} + diff --git a/vendor/riscv-isa-sim/softfloat/ui32_to_f64.c b/vendor/riscv-isa-sim/softfloat/ui32_to_f64.c new file mode 100644 index 00000000..5e5f843a --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/ui32_to_f64.c @@ -0,0 +1,59 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All Rights Reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t ui32_to_f64( uint32_t a ) +{ + uint_fast64_t uiZ; + int_fast8_t shiftDist; + union ui64_f64 uZ; + + if ( ! a ) { + uiZ = 0; + } else { + shiftDist = softfloat_countLeadingZeros32( a ) + 21; + uiZ = + packToF64UI( 0, 0x432 - shiftDist, (uint_fast64_t) a< +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float128_t ui64_to_f128( uint64_t a ) +{ + uint_fast64_t uiZ64, uiZ0; + int_fast8_t shiftDist; + struct uint128 zSig; + union ui128_f128 uZ; + + if ( ! a ) { + uiZ64 = 0; + uiZ0 = 0; + } else { + shiftDist = softfloat_countLeadingZeros64( a ) + 49; + if ( 64 <= shiftDist ) { + zSig.v64 = a<<(shiftDist - 64); + zSig.v0 = 0; + } else { + zSig = softfloat_shortShiftLeft128( 0, a, shiftDist ); + } + uiZ64 = packToF128UI64( 0, 0x406E - shiftDist, zSig.v64 ); + uiZ0 = zSig.v0; + } + uZ.ui.v64 = uiZ64; + uZ.ui.v0 = uiZ0; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/ui64_to_f16.c b/vendor/riscv-isa-sim/softfloat/ui64_to_f16.c new file mode 100644 index 00000000..ecca02bc --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/ui64_to_f16.c @@ -0,0 +1,64 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All Rights Reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float16_t ui64_to_f16( uint64_t a ) +{ + int_fast8_t shiftDist; + union ui16_f16 u; + uint_fast16_t sig; + + shiftDist = softfloat_countLeadingZeros64( a ) - 53; + if ( 0 <= shiftDist ) { + u.ui = + a ? packToF16UI( + 0, 0x18 - shiftDist, (uint_fast16_t) a< +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t ui64_to_f32( uint64_t a ) +{ + int_fast8_t shiftDist; + union ui32_f32 u; + uint_fast32_t sig; + + shiftDist = softfloat_countLeadingZeros64( a ) - 40; + if ( 0 <= shiftDist ) { + u.ui = + a ? packToF32UI( + 0, 0x95 - shiftDist, (uint_fast32_t) a< +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t ui64_to_f64( uint64_t a ) +{ + union ui64_f64 uZ; + + if ( ! a ) { + uZ.ui = 0; + return uZ.f; + } + if ( a & UINT64_C( 0x8000000000000000 ) ) { + return + softfloat_roundPackToF64( + 0, 0x43D, softfloat_shortShiftRightJam64( a, 1 ) ); + } else { + return softfloat_normRoundPackToF64( 0, 0x43C, a ); + } + +} + diff --git a/vendor/riscv-isa-sim/spike_dasm/spike-dasm.cc b/vendor/riscv-isa-sim/spike_dasm/spike-dasm.cc new file mode 100644 index 00000000..c4fc840f --- /dev/null +++ b/vendor/riscv-isa-sim/spike_dasm/spike-dasm.cc @@ -0,0 +1,70 @@ +// See LICENSE for license details. + +// This little program finds occurrences of strings like +// DASM(ffabc013) +// in its input, then replaces them with the disassembly +// enclosed hexadecimal number, interpreted as a RISC-V +// instruction. + +#include "disasm.h" +#include "extension.h" +#include +#include +#include +#include +using namespace std; + +int main(int argc, char** argv) +{ + string s; + const char* isa = DEFAULT_ISA; + + std::function extension; + option_parser_t parser; +#ifdef HAVE_DLOPEN + parser.option(0, "extension", 1, [&](const char* s){extension = find_extension(s);}); +#endif + parser.option(0, "isa", 1, [&](const char* s){isa = s;}); + parser.parse(argv); + + isa_parser_t isa_parser(isa, DEFAULT_PRIV); + disassembler_t* disassembler = new disassembler_t(&isa_parser); + if (extension) { + for (auto disasm_insn : extension()->get_disasms()) { + disassembler->add_insn(disasm_insn); + } + } + + while (getline(cin, s)) + { + for (size_t pos = 0; (pos = s.find("DASM(", pos)) != string::npos; ) + { + size_t start = pos; + + pos += strlen("DASM("); + + if (s[pos] == '0' && (s[pos+1] == 'x' || s[pos+1] == 'X')) + pos += 2; + + if (!isxdigit(s[pos])) + continue; + + char* endp; + int64_t bits = strtoull(&s[pos], &endp, 16); + if (*endp != ')') + continue; + + size_t nbits = 4 * (endp - &s[pos]); + if (nbits < 64) + bits = bits << (64 - nbits) >> (64 - nbits); + + string dis = disassembler->disassemble(bits); + s = s.substr(0, start) + dis + s.substr(endp - &s[0] + 1); + pos = start + dis.length(); + } + + cout << s << '\n'; + } + + return 0; +} diff --git a/vendor/riscv-isa-sim/spike_dasm/spike_dasm.ac b/vendor/riscv-isa-sim/spike_dasm/spike_dasm.ac new file mode 100644 index 00000000..e69de29b diff --git a/vendor/riscv-isa-sim/spike_dasm/spike_dasm.mk.in b/vendor/riscv-isa-sim/spike_dasm/spike_dasm.mk.in new file mode 100644 index 00000000..0233e62e --- /dev/null +++ b/vendor/riscv-isa-sim/spike_dasm/spike_dasm.mk.in @@ -0,0 +1,10 @@ +spike_dasm_subproject_deps = \ + disasm \ + softfloat \ + $(if $(HAVE_DLOPEN),riscv,) \ + +spike_dasm_srcs = \ + spike_dasm_option_parser.cc \ + +spike_dasm_install_prog_srcs = \ + spike-dasm.cc \ diff --git a/vendor/riscv-isa-sim/spike_dasm/spike_dasm_option_parser.cc b/vendor/riscv-isa-sim/spike_dasm/spike_dasm_option_parser.cc new file mode 100644 index 00000000..72daec40 --- /dev/null +++ b/vendor/riscv-isa-sim/spike_dasm/spike_dasm_option_parser.cc @@ -0,0 +1,51 @@ +// See LICENSE for license details. + +#include "option_parser.h" +#include +#include +#include +#include + +void option_parser_t::option(char c, const char* s, int arg, std::function action) +{ + opts.push_back(option_t(c, s, arg, action)); +} + +const char* const* option_parser_t::parse(const char* const* argv0) +{ + assert(argv0); + const char* const* argv = argv0 + 1; + for (const char* opt; (opt = *argv) != NULL && opt[0] == '-'; argv++) + { + bool found = false; + for (auto it = opts.begin(); !found && it != opts.end(); it++) + { + size_t slen = it->str ? strlen(it->str) : 0; + bool chr_match = opt[1] != '-' && it->chr && opt[1] == it->chr; + bool str_match = opt[1] == '-' && slen && strncmp(opt+2, it->str, slen) == 0; + if (chr_match || (str_match && (opt[2+slen] == '=' || opt[2+slen] == '\0'))) + { + const char* optarg = + chr_match ? (opt[2] ? &opt[2] : NULL) : + opt[2+slen] ? &opt[3+slen] : + it->arg ? *(++argv) : NULL; + if (optarg && !it->arg) + error("no argument allowed for option", *argv0, opt); + if (!optarg && it->arg) + error("argument required for option", *argv0, opt); + it->func(optarg); + found = true; + } + } + if (!found) + error("unrecognized option", *argv0, opt); + } + return argv; +} + +void option_parser_t::error(const char* msg, const char* argv0, const char* arg) +{ + fprintf(stderr, "%s: %s %s\n", argv0, msg, arg ? arg : ""); + if (helpmsg) helpmsg(); + exit(1); +} diff --git a/vendor/riscv-isa-sim/spike_main/spike-log-parser.cc b/vendor/riscv-isa-sim/spike_main/spike-log-parser.cc new file mode 100644 index 00000000..fab00f08 --- /dev/null +++ b/vendor/riscv-isa-sim/spike_main/spike-log-parser.cc @@ -0,0 +1,61 @@ +// See LICENSE for license details. + +// This little program finds occurrences of strings like +// core 0: 0x000000008000c36c (0xfe843783) ld a5, -24(s0) +// in its inputs, then output the RISC-V instruction with the disassembly +// enclosed hexadecimal number. + +#include +#include +#include +#include +#include "fesvr/option_parser.h" + +#include "disasm.h" +#include "extension.h" + +using namespace std; + +int main(int argc, char** argv) +{ + string s; + const char* isa_string = DEFAULT_ISA; + + std::function extension; + option_parser_t parser; + parser.option(0, "extension", 1, [&](const char* s){extension = find_extension(s);}); + parser.option(0, "isa", 1, [&](const char* s){isa_string = s;}); + parser.parse(argv); + + isa_parser_t isa(isa_string, DEFAULT_PRIV); + processor_t p(&isa, DEFAULT_VARCH, 0, 0, false, nullptr, cerr); + if (extension) { + p.register_extension(extension()); + } + + std::regex reg("^core\\s+\\d+:\\s+0x[0-9a-f]+\\s+\\(0x([0-9a-f]+)\\)", std::regex_constants::icase); + std::smatch m; + std::ssub_match sm ; + + while (getline(cin,s)){ + if (regex_search(s, m, reg)){ + // the opcode string + string op = m[1].str(); + uint32_t bit_num = op.size() * 4; + uint64_t opcode = strtoull(op.c_str(), nullptr, 16); + + if (bit_num<64){ + opcode = opcode << (64-bit_num) >> (64-bit_num); + } + + const disasm_insn_t* disasm = p.get_disassembler()->lookup(opcode); + if (disasm) { + cout << disasm->get_name() << '\n'; + } else { + cout << "unknown_op\n"; + } + } + } + + return 0; +} diff --git a/vendor/riscv-isa-sim/spike_main/spike.cc b/vendor/riscv-isa-sim/spike_main/spike.cc new file mode 100644 index 00000000..55290452 --- /dev/null +++ b/vendor/riscv-isa-sim/spike_main/spike.cc @@ -0,0 +1,530 @@ +// See LICENSE for license details. + +#include "cfg.h" +#include "sim.h" +#include "mmu.h" +#include "remote_bitbang.h" +#include "cachesim.h" +#include "extension.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include "../VERSION" + +static void help(int exit_code = 1) +{ + fprintf(stderr, "Spike RISC-V ISA Simulator " SPIKE_VERSION "\n\n"); + fprintf(stderr, "usage: spike [host options] [target options]\n"); + fprintf(stderr, "Host Options:\n"); + fprintf(stderr, " -p Simulate processors [default 1]\n"); + fprintf(stderr, " -m Provide MiB of target memory [default 2048]\n"); + fprintf(stderr, " -m Provide memory regions of size m and n bytes\n"); + fprintf(stderr, " at base addresses a and b (with 4 KiB alignment)\n"); + fprintf(stderr, " -d Interactive debug mode\n"); + fprintf(stderr, " -g Track histogram of PCs\n"); + fprintf(stderr, " -l Generate a log of execution\n"); +#ifdef HAVE_BOOST_ASIO + fprintf(stderr, " -s Command I/O via socket (use with -d)\n"); +#endif + fprintf(stderr, " -h, --help Print this help message\n"); + fprintf(stderr, " -H Start halted, allowing a debugger to connect\n"); + fprintf(stderr, " --log= File name for option -l\n"); + fprintf(stderr, " --debug-cmd= Read commands from file (use with -d)\n"); + fprintf(stderr, " --isa= RISC-V ISA string [default %s]\n", DEFAULT_ISA); + fprintf(stderr, " --priv= RISC-V privilege modes supported [default %s]\n", DEFAULT_PRIV); + fprintf(stderr, " --varch= RISC-V Vector uArch string [default %s]\n", DEFAULT_VARCH); + fprintf(stderr, " --pc=
Override ELF entry point\n"); + fprintf(stderr, " --hartids= Explicitly specify hartids, default is 0,1,...\n"); + fprintf(stderr, " --ic=:: Instantiate a cache model with S sets,\n"); + fprintf(stderr, " --dc=:: W ways, and B-byte blocks (with S and\n"); + fprintf(stderr, " --l2=:: B both powers of 2).\n"); + fprintf(stderr, " --device= Attach MMIO plugin device from an --extlib library\n"); + fprintf(stderr, " P -- Name of the MMIO plugin\n"); + fprintf(stderr, " B -- Base memory address of the device\n"); + fprintf(stderr, " A -- String arguments to pass to the plugin\n"); + fprintf(stderr, " This flag can be used multiple times.\n"); + fprintf(stderr, " The extlib flag for the library must come first.\n"); + fprintf(stderr, " --log-cache-miss Generate a log of cache miss\n"); + fprintf(stderr, " --extension= Specify RoCC Extension\n"); + fprintf(stderr, " This flag can be used multiple times.\n"); + fprintf(stderr, " --extlib= Shared library to load\n"); + fprintf(stderr, " This flag can be used multiple times.\n"); + fprintf(stderr, " --rbb-port= Listen on for remote bitbang connection\n"); + fprintf(stderr, " --dump-dts Print device tree string and exit\n"); + fprintf(stderr, " --disable-dtb Don't write the device tree blob into memory\n"); + fprintf(stderr, " --kernel= Load kernel flat image into memory\n"); + fprintf(stderr, " --initrd= Load kernel initrd into memory\n"); + fprintf(stderr, " --bootargs= Provide custom bootargs for kernel [default: console=hvc0 earlycon=sbi]\n"); + fprintf(stderr, " --real-time-clint Increment clint time at real-time rate\n"); + fprintf(stderr, " --dm-progsize= Progsize for the debug module [default 2]\n"); + fprintf(stderr, " --dm-sba= Debug system bus access supports up to " + " wide accesses [default 0]\n"); + fprintf(stderr, " --dm-auth Debug module requires debugger to authenticate\n"); + fprintf(stderr, " --dmi-rti= Number of Run-Test/Idle cycles " + "required for a DMI access [default 0]\n"); + fprintf(stderr, " --dm-abstract-rti= Number of Run-Test/Idle cycles " + "required for an abstract command to execute [default 0]\n"); + fprintf(stderr, " --dm-no-hasel Debug module supports hasel\n"); + fprintf(stderr, " --dm-no-abstract-csr Debug module won't support abstract to authenticate\n"); + fprintf(stderr, " --dm-no-halt-groups Debug module won't support halt groups\n"); + fprintf(stderr, " --dm-no-impebreak Debug module won't support implicit ebreak in program buffer\n"); + fprintf(stderr, " --blocksz= Cache block size (B) for CMO operations(powers of 2) [default 64]\n"); + + exit(exit_code); +} + +static void suggest_help() +{ + fprintf(stderr, "Try 'spike --help' for more information.\n"); + exit(1); +} + +static bool check_file_exists(const char *fileName) +{ + std::ifstream infile(fileName); + return infile.good(); +} + +static std::ifstream::pos_type get_file_size(const char *filename) +{ + std::ifstream in(filename, std::ios::ate | std::ios::binary); + return in.tellg(); +} + +static void read_file_bytes(const char *filename,size_t fileoff, + mem_t* mem, size_t memoff, size_t read_sz) +{ + std::ifstream in(filename, std::ios::in | std::ios::binary); + in.seekg(fileoff, std::ios::beg); + + std::vector read_buf(read_sz, 0); + in.read(&read_buf[0], read_sz); + mem->store(memoff, read_sz, (uint8_t*)&read_buf[0]); +} + +bool sort_mem_region(const mem_cfg_t &a, const mem_cfg_t &b) +{ + if (a.base == b.base) + return (a.size < b.size); + else + return (a.base < b.base); +} + +void merge_overlapping_memory_regions(std::vector &mems) +{ + // check the user specified memory regions and merge the overlapping or + // eliminate the containing parts + assert(!mems.empty()); + + std::sort(mems.begin(), mems.end(), sort_mem_region); + for (auto it = mems.begin() + 1; it != mems.end(); ) { + reg_t start = prev(it)->base; + reg_t end = prev(it)->base + prev(it)->size; + reg_t start2 = it->base; + reg_t end2 = it->base + it->size; + + //contains -> remove + if (start2 >= start && end2 <= end) { + it = mems.erase(it); + //partial overlapped -> extend + } else if (start2 >= start && start2 < end) { + prev(it)->size = std::max(end, end2) - start; + it = mems.erase(it); + // no overlapping -> keep it + } else { + it++; + } + } +} + +static std::vector parse_mem_layout(const char* arg) +{ + std::vector res; + + // handle legacy mem argument + char* p; + auto mb = strtoull(arg, &p, 0); + if (*p == 0) { + reg_t size = reg_t(mb) << 20; + if (size != (size_t)size) + throw std::runtime_error("Size would overflow size_t"); + res.push_back(mem_cfg_t(reg_t(DRAM_BASE), size)); + return res; + } + + // handle base/size tuples + while (true) { + auto base = strtoull(arg, &p, 0); + if (!*p || *p != ':') + help(); + auto size = strtoull(p + 1, &p, 0); + + // page-align base and size + auto base0 = base, size0 = size; + size += base0 % PGSIZE; + base -= base0 % PGSIZE; + if (size % PGSIZE != 0) + size += PGSIZE - size % PGSIZE; + + if (base + size < base) + help(); + + if (size != size0) { + fprintf(stderr, "Warning: the memory at [0x%llX, 0x%llX] has been realigned\n" + "to the %ld KiB page size: [0x%llX, 0x%llX]\n", + base0, base0 + size0 - 1, long(PGSIZE / 1024), base, base + size - 1); + } + + res.push_back(mem_cfg_t(reg_t(base), reg_t(size))); + if (!*p) + break; + if (*p != ',') + help(); + arg = p + 1; + } + + merge_overlapping_memory_regions(res); + + return res; +} + +static std::vector> make_mems(const std::vector &layout) +{ + std::vector> mems; + mems.reserve(layout.size()); + for (const auto &cfg : layout) { + mems.push_back(std::make_pair(cfg.base, new mem_t(cfg.size))); + } + return mems; +} + +static unsigned long atoul_safe(const char* s) +{ + char* e; + auto res = strtoul(s, &e, 10); + if (*e) + help(); + return res; +} + +static unsigned long atoul_nonzero_safe(const char* s) +{ + auto res = atoul_safe(s); + if (!res) + help(); + return res; +} + +static std::vector parse_hartids(const char *s) +{ + std::string const str(s); + std::stringstream stream(str); + std::vector hartids; + + int n; + while (stream >> n) { + hartids.push_back(n); + if (stream.peek() == ',') stream.ignore(); + } + + return hartids; +} + +int main(int argc, char** argv) +{ + bool debug = false; + bool halted = false; + bool histogram = false; + bool log = false; + bool socket = false; // command line option -s + bool dump_dts = false; + bool dtb_enabled = true; + const char* kernel = NULL; + reg_t kernel_offset, kernel_size; + std::vector> plugin_devices; + std::unique_ptr ic; + std::unique_ptr dc; + std::unique_ptr l2; + bool log_cache = false; + bool log_commits = false; + const char *log_path = nullptr; + std::vector> extensions; + const char* initrd = NULL; + const char* dtb_file = NULL; + uint16_t rbb_port = 0; + bool use_rbb = false; + unsigned dmi_rti = 0; + reg_t blocksz = 64; + debug_module_config_t dm_config = { + .progbufsize = 2, + .max_sba_data_width = 0, + .require_authentication = false, + .abstract_rti = 0, + .support_hasel = true, + .support_abstract_csr_access = true, + .support_haltgroups = true, + .support_impebreak = true + }; + cfg_arg_t nprocs(1); + + cfg_t cfg(/*default_initrd_bounds=*/std::make_pair((reg_t)0, (reg_t)0), + /*default_bootargs=*/nullptr, + /*default_isa=*/DEFAULT_ISA, + /*default_priv=*/DEFAULT_PRIV, + /*default_varch=*/DEFAULT_VARCH, + /*default_mem_layout=*/parse_mem_layout("2048"), + /*default_hartids=*/std::vector(), + /*default_real_time_clint=*/false); + + auto const device_parser = [&plugin_devices](const char *s) { + const std::string str(s); + std::istringstream stream(str); + + // We are parsing a string like name,base,args. + + // Parse the name, which is simply all of the characters leading up to the + // first comma. The validity of the plugin name will be checked later. + std::string name; + std::getline(stream, name, ','); + if (name.empty()) { + throw std::runtime_error("Plugin name is empty."); + } + + // Parse the base address. First, get all of the characters up to the next + // comma (or up to the end of the string if there is no comma). Then try to + // parse that string as an integer according to the rules of strtoull. It + // could be in decimal, hex, or octal. Fail if we were able to parse a + // number but there were garbage characters after the valid number. We must + // consume the entire string between the commas. + std::string base_str; + std::getline(stream, base_str, ','); + if (base_str.empty()) { + throw std::runtime_error("Device base address is empty."); + } + char* end; + reg_t base = static_cast(strtoull(base_str.c_str(), &end, 0)); + if (end != &*base_str.cend()) { + throw std::runtime_error("Error parsing device base address."); + } + + // The remainder of the string is the arguments. We could use getline, but + // that could ignore newline characters in the arguments. That should be + // rare and discouraged, but handle it here anyway with this weird in_avail + // technique. The arguments are optional, so if there were no arguments + // specified we could end up with an empty string here. That's okay. + auto avail = stream.rdbuf()->in_avail(); + std::string args(avail, '\0'); + stream.readsome(&args[0], avail); + + plugin_devices.emplace_back(base, new mmio_plugin_device_t(name, args)); + }; + + option_parser_t parser; + parser.help(&suggest_help); + parser.option('h', "help", 0, [&](const char* s){help(0);}); + parser.option('d', 0, 0, [&](const char* s){debug = true;}); + parser.option('g', 0, 0, [&](const char* s){histogram = true;}); + parser.option('l', 0, 0, [&](const char* s){log = true;}); +#ifdef HAVE_BOOST_ASIO + parser.option('s', 0, 0, [&](const char* s){socket = true;}); +#endif + parser.option('p', 0, 1, [&](const char* s){nprocs = atoul_nonzero_safe(s);}); + parser.option('m', 0, 1, [&](const char* s){cfg.mem_layout = parse_mem_layout(s);}); + // I wanted to use --halted, but for some reason that doesn't work. + parser.option('H', 0, 0, [&](const char* s){halted = true;}); + parser.option(0, "rbb-port", 1, [&](const char* s){use_rbb = true; rbb_port = atoul_safe(s);}); + parser.option(0, "pc", 1, [&](const char* s){cfg.start_pc = strtoull(s, 0, 0);}); + parser.option(0, "hartids", 1, [&](const char* s){ + cfg.hartids = parse_hartids(s); + cfg.explicit_hartids = true; + }); + parser.option(0, "ic", 1, [&](const char* s){ic.reset(new icache_sim_t(s));}); + parser.option(0, "dc", 1, [&](const char* s){dc.reset(new dcache_sim_t(s));}); + parser.option(0, "l2", 1, [&](const char* s){l2.reset(cache_sim_t::construct(s, "L2$"));}); + parser.option(0, "log-cache-miss", 0, [&](const char* s){log_cache = true;}); + parser.option(0, "isa", 1, [&](const char* s){cfg.isa = s;}); + parser.option(0, "priv", 1, [&](const char* s){cfg.priv = s;}); + parser.option(0, "varch", 1, [&](const char* s){cfg.varch = s;}); + parser.option(0, "device", 1, device_parser); + parser.option(0, "extension", 1, [&](const char* s){extensions.push_back(find_extension(s));}); + parser.option(0, "dump-dts", 0, [&](const char *s){dump_dts = true;}); + parser.option(0, "disable-dtb", 0, [&](const char *s){dtb_enabled = false;}); + parser.option(0, "dtb", 1, [&](const char *s){dtb_file = s;}); + parser.option(0, "kernel", 1, [&](const char* s){kernel = s;}); + parser.option(0, "initrd", 1, [&](const char* s){initrd = s;}); + parser.option(0, "bootargs", 1, [&](const char* s){cfg.bootargs = s;}); + parser.option(0, "real-time-clint", 0, [&](const char *s){cfg.real_time_clint = true;}); + parser.option(0, "extlib", 1, [&](const char *s){ + void *lib = dlopen(s, RTLD_NOW | RTLD_GLOBAL); + if (lib == NULL) { + fprintf(stderr, "Unable to load extlib '%s': %s\n", s, dlerror()); + exit(-1); + } + }); + parser.option(0, "dm-progsize", 1, + [&](const char* s){dm_config.progbufsize = atoul_safe(s);}); + parser.option(0, "dm-no-impebreak", 0, + [&](const char* s){dm_config.support_impebreak = false;}); + parser.option(0, "dm-sba", 1, + [&](const char* s){dm_config.max_sba_data_width = atoul_safe(s);}); + parser.option(0, "dm-auth", 0, + [&](const char* s){dm_config.require_authentication = true;}); + parser.option(0, "dmi-rti", 1, + [&](const char* s){dmi_rti = atoul_safe(s);}); + parser.option(0, "dm-abstract-rti", 1, + [&](const char* s){dm_config.abstract_rti = atoul_safe(s);}); + parser.option(0, "dm-no-hasel", 0, + [&](const char* s){dm_config.support_hasel = false;}); + parser.option(0, "dm-no-abstract-csr", 0, + [&](const char* s){dm_config.support_abstract_csr_access = false;}); + parser.option(0, "dm-no-halt-groups", 0, + [&](const char* s){dm_config.support_haltgroups = false;}); + parser.option(0, "log-commits", 0, + [&](const char* s){log_commits = true;}); + parser.option(0, "log", 1, + [&](const char* s){log_path = s;}); + FILE *cmd_file = NULL; + parser.option(0, "debug-cmd", 1, [&](const char* s){ + if ((cmd_file = fopen(s, "r"))==NULL) { + fprintf(stderr, "Unable to open command file '%s'\n", s); + exit(-1); + } + }); + parser.option(0, "blocksz", 1, [&](const char* s){ + blocksz = strtoull(s, 0, 0); + if (((blocksz & (blocksz - 1))) != 0) { + fprintf(stderr, "--blocksz should be power of 2\n"); + exit(-1); + } + }); + + auto argv1 = parser.parse(argv); + std::vector htif_args(argv1, (const char*const*)argv + argc); + + if (!*argv1) + help(); + + std::vector> mems = make_mems(cfg.mem_layout()); + + if (kernel && check_file_exists(kernel)) { + const char *isa = cfg.isa(); + kernel_size = get_file_size(kernel); + if (isa[2] == '6' && isa[3] == '4') + kernel_offset = 0x200000; + else + kernel_offset = 0x400000; + for (auto& m : mems) { + if (kernel_size && (kernel_offset + kernel_size) < m.second->size()) { + read_file_bytes(kernel, 0, m.second, kernel_offset, kernel_size); + break; + } + } + } + + if (initrd && check_file_exists(initrd)) { + size_t initrd_size = get_file_size(initrd); + for (auto& m : mems) { + if (initrd_size && (initrd_size + 0x1000) < m.second->size()) { + reg_t initrd_end = m.first + m.second->size() - 0x1000; + reg_t initrd_start = initrd_end - initrd_size; + cfg.initrd_bounds = std::make_pair(initrd_start, initrd_end); + read_file_bytes(initrd, 0, m.second, initrd_start - m.first, initrd_size); + break; + } + } + } + +#ifdef HAVE_BOOST_ASIO + boost::asio::io_service *io_service_ptr = NULL; // needed for socket command interface option -s + boost::asio::ip::tcp::acceptor *acceptor_ptr = NULL; + if (socket) { // if command line option -s is set + try + { // create socket server + using boost::asio::ip::tcp; + io_service_ptr = new boost::asio::io_service; + acceptor_ptr = new tcp::acceptor(*io_service_ptr, tcp::endpoint(tcp::v4(), 0)); + // aceptor is created passing argument port=0, so O.S. will choose a free port + std::string name = boost::asio::ip::host_name(); + std::cout << "Listening for debug commands on " << name.substr(0,name.find('.')) + << " port " << acceptor_ptr->local_endpoint().port() << " ." << std::endl; + // at the end, add space and some other character for convenience of javascript .split(" ") + } + catch (std::exception& e) + { + std::cerr << e.what() << std::endl; + exit(-1); + } + } +#endif + + if (cfg.explicit_hartids) { + if (nprocs.overridden() && (nprocs() != cfg.nprocs())) { + std::cerr << "Number of specified hartids (" + << cfg.nprocs() + << ") doesn't match specified number of processors (" + << nprocs() << ").\n"; + exit(1); + } + } else { + // Set default set of hartids based on nprocs, but don't set the + // explicit_hartids flag (which means that downstream code can know that + // we've only set the number of harts, not explicitly chosen their IDs). + std::vector default_hartids; + default_hartids.reserve(nprocs()); + for (size_t i = 0; i < nprocs(); ++i) { + default_hartids.push_back(i); + } + cfg.hartids = default_hartids; + } + + sim_t s(&cfg, halted, + mems, plugin_devices, htif_args, dm_config, log_path, dtb_enabled, dtb_file, +#ifdef HAVE_BOOST_ASIO + io_service_ptr, acceptor_ptr, +#endif + cmd_file); + std::unique_ptr remote_bitbang((remote_bitbang_t *) NULL); + std::unique_ptr jtag_dtm( + new jtag_dtm_t(&s.debug_module, dmi_rti)); + if (use_rbb) { + remote_bitbang.reset(new remote_bitbang_t(rbb_port, &(*jtag_dtm))); + s.set_remote_bitbang(&(*remote_bitbang)); + } + + if (dump_dts) { + printf("%s", s.get_dts()); + return 0; + } + + if (ic && l2) ic->set_miss_handler(&*l2); + if (dc && l2) dc->set_miss_handler(&*l2); + if (ic) ic->set_log(log_cache); + if (dc) dc->set_log(log_cache); + for (size_t i = 0; i < cfg.nprocs(); i++) + { + if (ic) s.get_core(i)->get_mmu()->register_memtracer(&*ic); + if (dc) s.get_core(i)->get_mmu()->register_memtracer(&*dc); + for (auto e : extensions) + s.get_core(i)->register_extension(e()); + s.get_core(i)->get_mmu()->set_cache_blocksz(blocksz); + } + + s.set_debug(debug); + s.configure_log(log, log_commits); + s.set_histogram(histogram); + + auto return_code = s.run(); + + for (auto& mem : mems) + delete mem.second; + + for (auto& plugin_device : plugin_devices) + delete plugin_device.second; + + return return_code; +} diff --git a/vendor/riscv-isa-sim/spike_main/spike_main.ac b/vendor/riscv-isa-sim/spike_main/spike_main.ac new file mode 100644 index 00000000..e69de29b diff --git a/vendor/riscv-isa-sim/spike_main/spike_main.mk.in b/vendor/riscv-isa-sim/spike_main/spike_main.mk.in new file mode 100644 index 00000000..35bef398 --- /dev/null +++ b/vendor/riscv-isa-sim/spike_main/spike_main.mk.in @@ -0,0 +1,16 @@ +spike_main_subproject_deps = \ + fdt \ + fesvr \ + softfloat \ + disasm \ + riscv \ + +spike_main_install_prog_srcs = \ + spike.cc \ + spike-log-parser.cc \ + xspike.cc \ + termios-xspike.cc \ + +spike_main_hdrs = \ + +spike_main_srcs = \ diff --git a/vendor/riscv-isa-sim/spike_main/termios-xspike.cc b/vendor/riscv-isa-sim/spike_main/termios-xspike.cc new file mode 100644 index 00000000..e533933b --- /dev/null +++ b/vendor/riscv-isa-sim/spike_main/termios-xspike.cc @@ -0,0 +1,29 @@ +// See LICENSE for license details. + +// termios-xspike sets up a canonical terminal and blocks forever. +// It allows us to send Ctrl-C etc. to the target machine. + +#include +#include +#include +#include +#include +#include + +int main() +{ + struct termios old_tios; + if (tcgetattr(0, &old_tios) < 0) + return -1; + + signal(SIGTERM, [](int) { }); + + struct termios new_tios = old_tios; + new_tios.c_lflag &= ~(ICANON | ECHO | ISIG); + if (tcsetattr(0, TCSANOW, &new_tios) < 0) + return -1; + + pause(); + + return tcsetattr(0, TCSANOW, &old_tios); +} diff --git a/vendor/riscv-isa-sim/spike_main/xspike.cc b/vendor/riscv-isa-sim/spike_main/xspike.cc new file mode 100644 index 00000000..f8c8ca7e --- /dev/null +++ b/vendor/riscv-isa-sim/spike_main/xspike.cc @@ -0,0 +1,102 @@ +// See LICENSE for license details. + +// xspike forks an xterm for spike's target machine console, +// preserving the current terminal for debugging. + +#include +#include +#include +#include +#include +#include +#include +#include + +static pid_t fork_spike(int tty_fd, int argc, char** argv); +static pid_t fork_xterm(int* tty_fd); + +int main(int argc, char** argv) +{ + int tty_fd, wait_status, ret = -1; + pid_t xterm, spike; + + static bool signal_exit = false; + auto handle_signal = [](int) { signal_exit = true; }; + + if ((xterm = fork_xterm(&tty_fd)) < 0) + { + fprintf(stderr, "could not open xterm\n"); + goto out; + } + + signal(SIGINT, handle_signal); + + if ((spike = fork_spike(tty_fd, argc, argv)) < 0) + { + fprintf(stderr, "could not open spike\n"); + goto close_xterm; + } + + while ((ret = waitpid(spike, &wait_status, 0)) < 0) + if (signal_exit) + break; + + if (ret < 0) // signal_exit + kill(spike, SIGTERM); + else + ret = WIFEXITED(wait_status) ? WEXITSTATUS(wait_status) : -1; + +close_xterm: + kill(-xterm, SIGTERM); +out: + return ret; +} + +static pid_t fork_spike(int tty_fd, int argc, char** argv) +{ + pid_t pid = fork(); + if (pid < 0) + return -1; + + if (pid == 0) + { + if (dup2(tty_fd, STDIN_FILENO) < 0 || dup2(tty_fd, STDOUT_FILENO) < 0) + return -1; + execvp("spike", argv); + return -1; + } + + return pid; +} + +static pid_t fork_xterm(int* tty_fd) +{ + static const char cmd[] = "3>&1 xterm -title xspike -e sh -c 'tty 1>&3; termios-xspike'"; + + int fds[2]; + if (pipe(fds) < 0) + return -1; + + pid_t pid = fork(); + if (pid < 0) + return -1; + + if (pid == 0) + { + setpgid(0, 0); + if (dup2(fds[1], STDOUT_FILENO) < 0) + return -1; + execl("/bin/sh", "sh", "-c", cmd, NULL); + return -1; + } + + char tty[PATH_MAX]; + ssize_t ttylen = read(fds[0], tty, sizeof(tty)); + if (ttylen <= 1 || tty[ttylen-1] != '\n') + return -1; + tty[ttylen-1] = '\0'; + if ((*tty_fd = open(tty, O_RDWR)) < 0) + return -1; + + return pid; +} diff --git a/vendor/riscv-isa-sim/tests/ebreak.py b/vendor/riscv-isa-sim/tests/ebreak.py new file mode 100755 index 00000000..dd7e6587 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/ebreak.py @@ -0,0 +1,26 @@ +#!/usr/bin/python + +import os +import testlib +import unittest +import tempfile +import time + +class EbreakTest(unittest.TestCase): + def setUp(self): + self.binary = testlib.compile("ebreak.s") + + def test_noport(self): + """Make sure that we can run past ebreak when --gdb-port isn't used.""" + spike = testlib.Spike(self.binary, with_gdb=False, timeout=10) + result = spike.wait() + self.assertEqual(result, 0) + + def test_nogdb(self): + """Make sure that we can run past ebreak when gdb isn't attached.""" + spike = testlib.Spike(self.binary, timeout=10) + result = spike.wait() + self.assertEqual(result, 0) + +if __name__ == '__main__': + unittest.main() diff --git a/vendor/riscv-isa-sim/tests/ebreak.s b/vendor/riscv-isa-sim/tests/ebreak.s new file mode 100644 index 00000000..99f3e07c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/ebreak.s @@ -0,0 +1,5 @@ + .global main +main: + li a0, 0 + ebreak + ret diff --git a/vendor/riscv-isa-sim/tests/mseccfg/Makefile b/vendor/riscv-isa-sim/tests/mseccfg/Makefile new file mode 100644 index 00000000..2277410c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/Makefile @@ -0,0 +1,70 @@ +# Makefile for program model example + +XLEN ?= 32 +VLEN ?= 1024 +RISCV_TOOL ?= /home/saad/Downloads/lowrisc-toolchain-gcc-rv32imcb-20220524-1/bin/ +SPIKE_PATH ?= /home/saad/work/riscv-isa-sim/build +SAIL_EMULATOR_PATH = /home/scratch.soberl_maxwell/arch1/sail_2021/sail-riscv/c_emulator + +SSP_OPT ?= +PERF ?= 0 + +LIB_PATH = . +# ../ctests/nvrvv_lib.c +COMMON_FILES = \ + $(LIB_PATH)/crt.S \ + $(LIB_PATH)/syscalls.c + +TEST_PATH = ./gengen_src/outputs + +ALL_TEST ?= $(basename $(notdir $(wildcard $(TEST_PATH)/*.c))) +DEV_TEST = test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_04 +OBJECTS ?= $(ALL_TEST) +# NEVER enable 'C' because pc + 4 is used in test code. +# -ffast-math -fno-common -fno-builtin-printf +CFLAGS = -march=rv$(XLEN)imafd -O2 -I . -I ./$(LIB_PATH) -I ../softfloat -I ../riscv \ + -fno-builtin-printf -fdata-sections -fno-section-anchors $(SSP_OPT) -DPRINTF_SUPPORTED=1 +LDFLAGS = -mcmodel=medany -static -nostdlib -nostartfiles -lm -lgcc \ + -T $(LIB_PATH)/mseccfg_test.ld -Wl,-M -Wl,-Map=link.log + +# must enable 'C', maybe used in pk +# 8M for TCM memories +# 16M for L2 memories +SIM_ISA = --isa=RV$(XLEN)IMAFDC + +default: + @echo "make gen, to generate all test cases with gengen" + @echo "make run, to run all test cases" + @echo "set OBJECTS variant to select specified test case" + +gen: + cd gengen_src; $(MAKE); $(MAKE) gen; + +$(OBJECTS): + @$(RISCV_TOOL)/riscv$(XLEN)-unknown-elf-gcc $(CFLAGS) $(TEST_PATH)/$@.c $(COMMON_FILES) $(LDFLAGS) -o a.out + @echo Running $(TEST_PATH)/$@.c - command - $(RISCV_TOOL)/riscv$(XLEN)-unknown-elf-gcc $(CFLAGS) $(TEST_PATH)/$@.c $(COMMON_FILES) $(LDFLAGS) -o a.out + @$(RISCV_TOOL)/riscv$(XLEN)-unknown-elf-objdump -d a.out > a.ss + @$(RISCV_TOOL)/riscv$(XLEN)-unknown-elf-objdump --disassemble=target_foo a.out >> a.ss +ifeq ($(PERF), 0) + $(SPIKE_PATH)/spike $(SIM_ISA) -m0x100000:0x200000 a.out +# $(SAIL_EMULATOR_PATH)/riscv_sim_RV64 --enable-pmp -V a.out > tmp.log 2>&1; grep SUCCESS tmp.log +# @! grep FAILURE tmp.log +# $(RISCV_TOOL)/spike $(SIM_ISA) -l a.out > $@_pc.log 2>&1 +# sed -i '0,/ nop/d' $@_pc.log +# sed -i '/ nop/q' $@_pc.log +endif + +run: $(OBJECTS) + +clean: + rm *.s *.o *.i *.ss *.out *.log *.bin + +log: + $(SPIKE_PATH)/spike $(SIM_ISA) -m0x100000:0x200000 -l a.out > 1.log 2>&1 + $(SAIL_EMULATOR_PATH)/riscv_sim_RV64 --enable-pmp a.out > 2.log 2>&1 + +env: + echo $(ALL_TEST) + + +.PHONY: gen $(OBJECTS) clean diff --git a/vendor/riscv-isa-sim/tests/mseccfg/crt.S b/vendor/riscv-isa-sim/tests/mseccfg/crt.S new file mode 100644 index 00000000..bfbceae2 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/crt.S @@ -0,0 +1,230 @@ +# See LICENSE for license details. + +#include "encoding.h" + +#if __riscv_xlen == 64 +# define LREG ld +# define SREG sd +# define REGBYTES 8 +#else +# define LREG lw +# define SREG sw +# define REGBYTES 4 +#endif + + .section ".text.init" + .globl _start +_start: + li x1, 0 + li x2, 0 + li x3, 0 + li x4, 0 + li x5, 0 + li x6, 0 + li x7, 0 + li x8, 0 + li x9, 0 + li x10,0 + li x11,0 + li x12,0 + li x13,0 + li x14,0 + li x15,0 + li x16,0 + li x17,0 + li x18,0 + li x19,0 + li x20,0 + li x21,0 + li x22,0 + li x23,0 + li x24,0 + li x25,0 + li x26,0 + li x27,0 + li x28,0 + li x29,0 + li x30,0 + li x31,0 + + # enable FPU and accelerator if present + li t0, MSTATUS_FS | MSTATUS_XS + csrs mstatus, t0 + + # make sure XLEN agrees with compilation choice + li t0, 1 + slli t0, t0, 31 +#if __riscv_xlen == 64 + bgez t0, 1f +#else + bltz t0, 1f +#endif +2: + li a0, 1 + sw a0, tohost, t0 + j 2b +1: + +#ifdef __riscv_flen + # initialize FPU if we have one + la t0, 1f + csrw mtvec, t0 + + fssr x0 + fmv.s.x f0, x0 + fmv.s.x f1, x0 + fmv.s.x f2, x0 + fmv.s.x f3, x0 + fmv.s.x f4, x0 + fmv.s.x f5, x0 + fmv.s.x f6, x0 + fmv.s.x f7, x0 + fmv.s.x f8, x0 + fmv.s.x f9, x0 + fmv.s.x f10,x0 + fmv.s.x f11,x0 + fmv.s.x f12,x0 + fmv.s.x f13,x0 + fmv.s.x f14,x0 + fmv.s.x f15,x0 + fmv.s.x f16,x0 + fmv.s.x f17,x0 + fmv.s.x f18,x0 + fmv.s.x f19,x0 + fmv.s.x f20,x0 + fmv.s.x f21,x0 + fmv.s.x f22,x0 + fmv.s.x f23,x0 + fmv.s.x f24,x0 + fmv.s.x f25,x0 + fmv.s.x f26,x0 + fmv.s.x f27,x0 + fmv.s.x f28,x0 + fmv.s.x f29,x0 + fmv.s.x f30,x0 + fmv.s.x f31,x0 +1: +#endif + + # initialize trap vector + la t0, trap_entry + csrw mtvec, t0 + + # initialize global pointer +.option push +.option norelax + la gp, __global_pointer$ +.option pop + + la tp, _end + 63 + and tp, tp, -64 + + # get core id + csrr a0, mhartid + # for now, assume only 1 core + li a1, 1 +1:bgeu a0, a1, 1b + + # give each core 128KB of stack + TLS +#define STKSHIFT 17 + add sp, a0, 1 + sll sp, sp, STKSHIFT + add sp, sp, tp + sll a2, a0, STKSHIFT + add tp, tp, a2 + + j _init + + .align 2 +trap_entry: + #addi sp, sp, -272 + # use shadow address + SREG sp, 31*REGBYTES(gp) + addi sp, gp, 512 + addi sp, gp, 512 + + SREG x1, 1*REGBYTES(sp) + SREG x2, 2*REGBYTES(sp) + SREG x3, 3*REGBYTES(sp) + SREG x4, 4*REGBYTES(sp) + SREG x5, 5*REGBYTES(sp) + SREG x6, 6*REGBYTES(sp) + SREG x7, 7*REGBYTES(sp) + SREG x8, 8*REGBYTES(sp) + SREG x9, 9*REGBYTES(sp) + SREG x10, 10*REGBYTES(sp) + SREG x11, 11*REGBYTES(sp) + SREG x12, 12*REGBYTES(sp) + SREG x13, 13*REGBYTES(sp) + SREG x14, 14*REGBYTES(sp) + SREG x15, 15*REGBYTES(sp) + SREG x16, 16*REGBYTES(sp) + SREG x17, 17*REGBYTES(sp) + SREG x18, 18*REGBYTES(sp) + SREG x19, 19*REGBYTES(sp) + SREG x20, 20*REGBYTES(sp) + SREG x21, 21*REGBYTES(sp) + SREG x22, 22*REGBYTES(sp) + SREG x23, 23*REGBYTES(sp) + SREG x24, 24*REGBYTES(sp) + SREG x25, 25*REGBYTES(sp) + SREG x26, 26*REGBYTES(sp) + SREG x27, 27*REGBYTES(sp) + SREG x28, 28*REGBYTES(sp) + SREG x29, 29*REGBYTES(sp) + SREG x30, 30*REGBYTES(sp) + SREG x31, 31*REGBYTES(sp) + + csrr a0, mcause + csrr a1, mepc + mv a2, sp + jal handle_trap + csrw mepc, a0 + + # Remain in M-mode after eret + #li t0, MSTATUS_MPP + #csrs mstatus, t0 + + LREG x1, 1*REGBYTES(sp) + LREG x2, 2*REGBYTES(sp) + LREG x3, 3*REGBYTES(sp) + LREG x4, 4*REGBYTES(sp) + LREG x5, 5*REGBYTES(sp) + LREG x6, 6*REGBYTES(sp) + LREG x7, 7*REGBYTES(sp) + LREG x8, 8*REGBYTES(sp) + LREG x9, 9*REGBYTES(sp) + LREG x10, 10*REGBYTES(sp) + LREG x11, 11*REGBYTES(sp) + LREG x12, 12*REGBYTES(sp) + LREG x13, 13*REGBYTES(sp) + LREG x14, 14*REGBYTES(sp) + LREG x15, 15*REGBYTES(sp) + LREG x16, 16*REGBYTES(sp) + LREG x17, 17*REGBYTES(sp) + LREG x18, 18*REGBYTES(sp) + LREG x19, 19*REGBYTES(sp) + LREG x20, 20*REGBYTES(sp) + LREG x21, 21*REGBYTES(sp) + LREG x22, 22*REGBYTES(sp) + LREG x23, 23*REGBYTES(sp) + LREG x24, 24*REGBYTES(sp) + LREG x25, 25*REGBYTES(sp) + LREG x26, 26*REGBYTES(sp) + LREG x27, 27*REGBYTES(sp) + LREG x28, 28*REGBYTES(sp) + LREG x29, 29*REGBYTES(sp) + LREG x30, 30*REGBYTES(sp) + LREG x31, 31*REGBYTES(sp) + + #addi sp, sp, 272 + LREG sp, 31*REGBYTES(gp) + mret + +.section ".tohost","aw",@progbits +.align 6 +.globl tohost +tohost: .dword 0 +.align 6 +.globl fromhost +fromhost: .dword 0 diff --git a/vendor/riscv-isa-sim/tests/mseccfg/encoding.h b/vendor/riscv-isa-sim/tests/mseccfg/encoding.h new file mode 100644 index 00000000..e32f9580 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/encoding.h @@ -0,0 +1,1473 @@ +// See LICENSE for license details. + +#ifndef RISCV_CSR_ENCODING_H +#define RISCV_CSR_ENCODING_H + +#define MSTATUS_UIE 0x00000001 +#define MSTATUS_SIE 0x00000002 +#define MSTATUS_HIE 0x00000004 +#define MSTATUS_MIE 0x00000008 +#define MSTATUS_UPIE 0x00000010 +#define MSTATUS_SPIE 0x00000020 +#define MSTATUS_HPIE 0x00000040 +#define MSTATUS_MPIE 0x00000080 +#define MSTATUS_SPP 0x00000100 +#define MSTATUS_HPP 0x00000600 +#define MSTATUS_MPP 0x00001800 +#define MSTATUS_FS 0x00006000 +#define MSTATUS_XS 0x00018000 +#define MSTATUS_MPRV 0x00020000 +#define MSTATUS_SUM 0x00040000 +#define MSTATUS_MXR 0x00080000 +#define MSTATUS_TVM 0x00100000 +#define MSTATUS_TW 0x00200000 +#define MSTATUS_TSR 0x00400000 +#define MSTATUS_VS 0x01800000 +#define MSTATUS32_SD 0x80000000 +#define MSTATUS_UXL 0x0000000300000000 +#define MSTATUS_SXL 0x0000000C00000000 +#define MSTATUS64_SD 0x8000000000000000 + +#define SSTATUS_UIE 0x00000001 +#define SSTATUS_SIE 0x00000002 +#define SSTATUS_UPIE 0x00000010 +#define SSTATUS_SPIE 0x00000020 +#define SSTATUS_SPP 0x00000100 +#define SSTATUS_FS 0x00006000 +#define SSTATUS_XS 0x00018000 +#define SSTATUS_SUM 0x00040000 +#define SSTATUS_MXR 0x00080000 +#define SSTATUS_VS 0x01800000 +#define SSTATUS32_SD 0x80000000 +#define SSTATUS_UXL 0x0000000300000000 +#define SSTATUS64_SD 0x8000000000000000 + +#define DCSR_XDEBUGVER (3U<<30) +#define DCSR_NDRESET (1<<29) +#define DCSR_FULLRESET (1<<28) +#define DCSR_EBREAKM (1<<15) +#define DCSR_EBREAKH (1<<14) +#define DCSR_EBREAKS (1<<13) +#define DCSR_EBREAKU (1<<12) +#define DCSR_STOPCYCLE (1<<10) +#define DCSR_STOPTIME (1<<9) +#define DCSR_CAUSE (7<<6) +#define DCSR_DEBUGINT (1<<5) +#define DCSR_HALT (1<<3) +#define DCSR_STEP (1<<2) +#define DCSR_PRV (3<<0) + +#define DCSR_CAUSE_NONE 0 +#define DCSR_CAUSE_SWBP 1 +#define DCSR_CAUSE_HWBP 2 +#define DCSR_CAUSE_DEBUGINT 3 +#define DCSR_CAUSE_STEP 4 +#define DCSR_CAUSE_HALT 5 + +#define MCONTROL_TYPE(xlen) (0xfULL<<((xlen)-4)) +#define MCONTROL_DMODE(xlen) (1ULL<<((xlen)-5)) +#define MCONTROL_MASKMAX(xlen) (0x3fULL<<((xlen)-11)) + +#define MCONTROL_SELECT (1<<19) +#define MCONTROL_TIMING (1<<18) +#define MCONTROL_ACTION (0x3f<<12) +#define MCONTROL_CHAIN (1<<11) +#define MCONTROL_MATCH (0xf<<7) +#define MCONTROL_M (1<<6) +#define MCONTROL_H (1<<5) +#define MCONTROL_S (1<<4) +#define MCONTROL_U (1<<3) +#define MCONTROL_EXECUTE (1<<2) +#define MCONTROL_STORE (1<<1) +#define MCONTROL_LOAD (1<<0) + +#define MCONTROL_TYPE_NONE 0 +#define MCONTROL_TYPE_MATCH 2 + +#define MCONTROL_ACTION_DEBUG_EXCEPTION 0 +#define MCONTROL_ACTION_DEBUG_MODE 1 +#define MCONTROL_ACTION_TRACE_START 2 +#define MCONTROL_ACTION_TRACE_STOP 3 +#define MCONTROL_ACTION_TRACE_EMIT 4 + +#define MCONTROL_MATCH_EQUAL 0 +#define MCONTROL_MATCH_NAPOT 1 +#define MCONTROL_MATCH_GE 2 +#define MCONTROL_MATCH_LT 3 +#define MCONTROL_MATCH_MASK_LOW 4 +#define MCONTROL_MATCH_MASK_HIGH 5 + +#define MIP_SSIP (1 << IRQ_S_SOFT) +#define MIP_HSIP (1 << IRQ_H_SOFT) +#define MIP_MSIP (1 << IRQ_M_SOFT) +#define MIP_STIP (1 << IRQ_S_TIMER) +#define MIP_HTIP (1 << IRQ_H_TIMER) +#define MIP_MTIP (1 << IRQ_M_TIMER) +#define MIP_SEIP (1 << IRQ_S_EXT) +#define MIP_HEIP (1 << IRQ_H_EXT) +#define MIP_MEIP (1 << IRQ_M_EXT) + +#define SIP_SSIP MIP_SSIP +#define SIP_STIP MIP_STIP + +#define PRV_U 0 +#define PRV_S 1 +#define PRV_H 2 +#define PRV_M 3 + +#define SATP32_MODE 0x80000000 +#define SATP32_ASID 0x7FC00000 +#define SATP32_PPN 0x003FFFFF +#define SATP64_MODE 0xF000000000000000 +#define SATP64_ASID 0x0FFFF00000000000 +#define SATP64_PPN 0x00000FFFFFFFFFFF + +#define SATP_MODE_OFF 0 +#define SATP_MODE_SV32 1 +#define SATP_MODE_SV39 8 +#define SATP_MODE_SV48 9 +#define SATP_MODE_SV57 10 +#define SATP_MODE_SV64 11 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define IRQ_S_SOFT 1 +#define IRQ_H_SOFT 2 +#define IRQ_M_SOFT 3 +#define IRQ_S_TIMER 5 +#define IRQ_H_TIMER 6 +#define IRQ_M_TIMER 7 +#define IRQ_S_EXT 9 +#define IRQ_H_EXT 10 +#define IRQ_M_EXT 11 +#define IRQ_COP 12 +#define IRQ_HOST 13 + +#define DEFAULT_RSTVEC 0x00001000 +#define CLINT_BASE 0x02000000 +#define CLINT_SIZE 0x000c0000 +#define EXT_IO_BASE 0x40000000 +#define DRAM_BASE 0x80000000 + +// page table entry (PTE) fields +#define PTE_V 0x001 // Valid +#define PTE_R 0x002 // Read +#define PTE_W 0x004 // Write +#define PTE_X 0x008 // Execute +#define PTE_U 0x010 // User +#define PTE_G 0x020 // Global +#define PTE_A 0x040 // Accessed +#define PTE_D 0x080 // Dirty +#define PTE_SOFT 0x300 // Reserved for Software + +#define PTE_PPN_SHIFT 10 + +#define PTE_TABLE(PTE) (((PTE) & (PTE_V | PTE_R | PTE_W | PTE_X)) == PTE_V) + +#ifdef __riscv + +#if __riscv_xlen == 64 +# define MSTATUS_SD MSTATUS64_SD +# define SSTATUS_SD SSTATUS64_SD +# define RISCV_PGLEVEL_BITS 9 +# define SATP_MODE SATP64_MODE +#else +# define MSTATUS_SD MSTATUS32_SD +# define SSTATUS_SD SSTATUS32_SD +# define RISCV_PGLEVEL_BITS 10 +# define SATP_MODE SATP32_MODE +#endif +#define RISCV_PGSHIFT 12 +#define RISCV_PGSIZE (1 << RISCV_PGSHIFT) + +#ifndef __ASSEMBLER__ + +#ifdef __GNUC__ + +#define read_csr(reg) ({ unsigned long __tmp; \ + asm volatile ("csrr %0, " #reg : "=r"(__tmp)); \ + __tmp; }) + +#define write_csr(reg, val) ({ \ + asm volatile ("csrw " #reg ", %0" :: "rK"(val)); }) + +#define swap_csr(reg, val) ({ unsigned long __tmp; \ + asm volatile ("csrrw %0, " #reg ", %1" : "=r"(__tmp) : "rK"(val)); \ + __tmp; }) + +#define set_csr(reg, bit) ({ unsigned long __tmp; \ + asm volatile ("csrrs %0, " #reg ", %1" : "=r"(__tmp) : "rK"(bit)); \ + __tmp; }) + +#define clear_csr(reg, bit) ({ unsigned long __tmp; \ + asm volatile ("csrrc %0, " #reg ", %1" : "=r"(__tmp) : "rK"(bit)); \ + __tmp; }) + +#define rdtime() read_csr(time) +#define rdcycle() read_csr(cycle) +#define rdinstret() read_csr(instret) + +#endif + +#endif + +#endif + +#endif +/* Automatically generated by parse-opcodes. */ +#ifndef RISCV_ENCODING_H +#define RISCV_ENCODING_H +#define MATCH_BEQ 0x63 +#define MASK_BEQ 0x707f +#define MATCH_BNE 0x1063 +#define MASK_BNE 0x707f +#define MATCH_BLT 0x4063 +#define MASK_BLT 0x707f +#define MATCH_BGE 0x5063 +#define MASK_BGE 0x707f +#define MATCH_BLTU 0x6063 +#define MASK_BLTU 0x707f +#define MATCH_BGEU 0x7063 +#define MASK_BGEU 0x707f +#define MATCH_JALR 0x67 +#define MASK_JALR 0x707f +#define MATCH_JAL 0x6f +#define MASK_JAL 0x7f +#define MATCH_LUI 0x37 +#define MASK_LUI 0x7f +#define MATCH_AUIPC 0x17 +#define MASK_AUIPC 0x7f +#define MATCH_ADDI 0x13 +#define MASK_ADDI 0x707f +#define MATCH_SLLI 0x1013 +#define MASK_SLLI 0xfc00707f +#define MATCH_SLTI 0x2013 +#define MASK_SLTI 0x707f +#define MATCH_SLTIU 0x3013 +#define MASK_SLTIU 0x707f +#define MATCH_XORI 0x4013 +#define MASK_XORI 0x707f +#define MATCH_SRLI 0x5013 +#define MASK_SRLI 0xfc00707f +#define MATCH_SRAI 0x40005013 +#define MASK_SRAI 0xfc00707f +#define MATCH_ORI 0x6013 +#define MASK_ORI 0x707f +#define MATCH_ANDI 0x7013 +#define MASK_ANDI 0x707f +#define MATCH_ADD 0x33 +#define MASK_ADD 0xfe00707f +#define MATCH_SUB 0x40000033 +#define MASK_SUB 0xfe00707f +#define MATCH_SLL 0x1033 +#define MASK_SLL 0xfe00707f +#define MATCH_SLT 0x2033 +#define MASK_SLT 0xfe00707f +#define MATCH_SLTU 0x3033 +#define MASK_SLTU 0xfe00707f +#define MATCH_XOR 0x4033 +#define MASK_XOR 0xfe00707f +#define MATCH_SRL 0x5033 +#define MASK_SRL 0xfe00707f +#define MATCH_SRA 0x40005033 +#define MASK_SRA 0xfe00707f +#define MATCH_OR 0x6033 +#define MASK_OR 0xfe00707f +#define MATCH_AND 0x7033 +#define MASK_AND 0xfe00707f +#define MATCH_ADDIW 0x1b +#define MASK_ADDIW 0x707f +#define MATCH_SLLIW 0x101b +#define MASK_SLLIW 0xfe00707f +#define MATCH_SRLIW 0x501b +#define MASK_SRLIW 0xfe00707f +#define MATCH_SRAIW 0x4000501b +#define MASK_SRAIW 0xfe00707f +#define MATCH_ADDW 0x3b +#define MASK_ADDW 0xfe00707f +#define MATCH_SUBW 0x4000003b +#define MASK_SUBW 0xfe00707f +#define MATCH_SLLW 0x103b +#define MASK_SLLW 0xfe00707f +#define MATCH_SRLW 0x503b +#define MASK_SRLW 0xfe00707f +#define MATCH_SRAW 0x4000503b +#define MASK_SRAW 0xfe00707f +#define MATCH_LB 0x3 +#define MASK_LB 0x707f +#define MATCH_LH 0x1003 +#define MASK_LH 0x707f +#define MATCH_LW 0x2003 +#define MASK_LW 0x707f +#define MATCH_LD 0x3003 +#define MASK_LD 0x707f +#define MATCH_LBU 0x4003 +#define MASK_LBU 0x707f +#define MATCH_LHU 0x5003 +#define MASK_LHU 0x707f +#define MATCH_LWU 0x6003 +#define MASK_LWU 0x707f +#define MATCH_SB 0x23 +#define MASK_SB 0x707f +#define MATCH_SH 0x1023 +#define MASK_SH 0x707f +#define MATCH_SW 0x2023 +#define MASK_SW 0x707f +#define MATCH_SD 0x3023 +#define MASK_SD 0x707f +#define MATCH_FENCE 0xf +#define MASK_FENCE 0x707f +#define MATCH_FENCE_I 0x100f +#define MASK_FENCE_I 0x707f +#define MATCH_MUL 0x2000033 +#define MASK_MUL 0xfe00707f +#define MATCH_MULH 0x2001033 +#define MASK_MULH 0xfe00707f +#define MATCH_MULHSU 0x2002033 +#define MASK_MULHSU 0xfe00707f +#define MATCH_MULHU 0x2003033 +#define MASK_MULHU 0xfe00707f +#define MATCH_DIV 0x2004033 +#define MASK_DIV 0xfe00707f +#define MATCH_DIVU 0x2005033 +#define MASK_DIVU 0xfe00707f +#define MATCH_REM 0x2006033 +#define MASK_REM 0xfe00707f +#define MATCH_REMU 0x2007033 +#define MASK_REMU 0xfe00707f +#define MATCH_MULW 0x200003b +#define MASK_MULW 0xfe00707f +#define MATCH_DIVW 0x200403b +#define MASK_DIVW 0xfe00707f +#define MATCH_DIVUW 0x200503b +#define MASK_DIVUW 0xfe00707f +#define MATCH_REMW 0x200603b +#define MASK_REMW 0xfe00707f +#define MATCH_REMUW 0x200703b +#define MASK_REMUW 0xfe00707f +#define MATCH_AMOADD_W 0x202f +#define MASK_AMOADD_W 0xf800707f +#define MATCH_AMOXOR_W 0x2000202f +#define MASK_AMOXOR_W 0xf800707f +#define MATCH_AMOOR_W 0x4000202f +#define MASK_AMOOR_W 0xf800707f +#define MATCH_AMOAND_W 0x6000202f +#define MASK_AMOAND_W 0xf800707f +#define MATCH_AMOMIN_W 0x8000202f +#define MASK_AMOMIN_W 0xf800707f +#define MATCH_AMOMAX_W 0xa000202f +#define MASK_AMOMAX_W 0xf800707f +#define MATCH_AMOMINU_W 0xc000202f +#define MASK_AMOMINU_W 0xf800707f +#define MATCH_AMOMAXU_W 0xe000202f +#define MASK_AMOMAXU_W 0xf800707f +#define MATCH_AMOSWAP_W 0x800202f +#define MASK_AMOSWAP_W 0xf800707f +#define MATCH_LR_W 0x1000202f +#define MASK_LR_W 0xf9f0707f +#define MATCH_SC_W 0x1800202f +#define MASK_SC_W 0xf800707f +#define MATCH_AMOADD_D 0x302f +#define MASK_AMOADD_D 0xf800707f +#define MATCH_AMOXOR_D 0x2000302f +#define MASK_AMOXOR_D 0xf800707f +#define MATCH_AMOOR_D 0x4000302f +#define MASK_AMOOR_D 0xf800707f +#define MATCH_AMOAND_D 0x6000302f +#define MASK_AMOAND_D 0xf800707f +#define MATCH_AMOMIN_D 0x8000302f +#define MASK_AMOMIN_D 0xf800707f +#define MATCH_AMOMAX_D 0xa000302f +#define MASK_AMOMAX_D 0xf800707f +#define MATCH_AMOMINU_D 0xc000302f +#define MASK_AMOMINU_D 0xf800707f +#define MATCH_AMOMAXU_D 0xe000302f +#define MASK_AMOMAXU_D 0xf800707f +#define MATCH_AMOSWAP_D 0x800302f +#define MASK_AMOSWAP_D 0xf800707f +#define MATCH_LR_D 0x1000302f +#define MASK_LR_D 0xf9f0707f +#define MATCH_SC_D 0x1800302f +#define MASK_SC_D 0xf800707f +#define MATCH_ECALL 0x73 +#define MASK_ECALL 0xffffffff +#define MATCH_EBREAK 0x100073 +#define MASK_EBREAK 0xffffffff +#define MATCH_URET 0x200073 +#define MASK_URET 0xffffffff +#define MATCH_SRET 0x10200073 +#define MASK_SRET 0xffffffff +#define MATCH_MRET 0x30200073 +#define MASK_MRET 0xffffffff +#define MATCH_DRET 0x7b200073 +#define MASK_DRET 0xffffffff +#define MATCH_SFENCE_VMA 0x12000073 +#define MASK_SFENCE_VMA 0xfe007fff +#define MATCH_WFI 0x10500073 +#define MASK_WFI 0xffffffff +#define MATCH_CSRRW 0x1073 +#define MASK_CSRRW 0x707f +#define MATCH_CSRRS 0x2073 +#define MASK_CSRRS 0x707f +#define MATCH_CSRRC 0x3073 +#define MASK_CSRRC 0x707f +#define MATCH_CSRRWI 0x5073 +#define MASK_CSRRWI 0x707f +#define MATCH_CSRRSI 0x6073 +#define MASK_CSRRSI 0x707f +#define MATCH_CSRRCI 0x7073 +#define MASK_CSRRCI 0x707f +#define MATCH_FADD_S 0x53 +#define MASK_FADD_S 0xfe00007f +#define MATCH_FSUB_S 0x8000053 +#define MASK_FSUB_S 0xfe00007f +#define MATCH_FMUL_S 0x10000053 +#define MASK_FMUL_S 0xfe00007f +#define MATCH_FDIV_S 0x18000053 +#define MASK_FDIV_S 0xfe00007f +#define MATCH_FSGNJ_S 0x20000053 +#define MASK_FSGNJ_S 0xfe00707f +#define MATCH_FSGNJN_S 0x20001053 +#define MASK_FSGNJN_S 0xfe00707f +#define MATCH_FSGNJX_S 0x20002053 +#define MASK_FSGNJX_S 0xfe00707f +#define MATCH_FMIN_S 0x28000053 +#define MASK_FMIN_S 0xfe00707f +#define MATCH_FMAX_S 0x28001053 +#define MASK_FMAX_S 0xfe00707f +#define MATCH_FSQRT_S 0x58000053 +#define MASK_FSQRT_S 0xfff0007f +#define MATCH_FADD_D 0x2000053 +#define MASK_FADD_D 0xfe00007f +#define MATCH_FSUB_D 0xa000053 +#define MASK_FSUB_D 0xfe00007f +#define MATCH_FMUL_D 0x12000053 +#define MASK_FMUL_D 0xfe00007f +#define MATCH_FDIV_D 0x1a000053 +#define MASK_FDIV_D 0xfe00007f +#define MATCH_FSGNJ_D 0x22000053 +#define MASK_FSGNJ_D 0xfe00707f +#define MATCH_FSGNJN_D 0x22001053 +#define MASK_FSGNJN_D 0xfe00707f +#define MATCH_FSGNJX_D 0x22002053 +#define MASK_FSGNJX_D 0xfe00707f +#define MATCH_FMIN_D 0x2a000053 +#define MASK_FMIN_D 0xfe00707f +#define MATCH_FMAX_D 0x2a001053 +#define MASK_FMAX_D 0xfe00707f +#define MATCH_FCVT_S_D 0x40100053 +#define MASK_FCVT_S_D 0xfff0007f +#define MATCH_FCVT_D_S 0x42000053 +#define MASK_FCVT_D_S 0xfff0007f +#define MATCH_FSQRT_D 0x5a000053 +#define MASK_FSQRT_D 0xfff0007f +#define MATCH_FADD_Q 0x6000053 +#define MASK_FADD_Q 0xfe00007f +#define MATCH_FSUB_Q 0xe000053 +#define MASK_FSUB_Q 0xfe00007f +#define MATCH_FMUL_Q 0x16000053 +#define MASK_FMUL_Q 0xfe00007f +#define MATCH_FDIV_Q 0x1e000053 +#define MASK_FDIV_Q 0xfe00007f +#define MATCH_FSGNJ_Q 0x26000053 +#define MASK_FSGNJ_Q 0xfe00707f +#define MATCH_FSGNJN_Q 0x26001053 +#define MASK_FSGNJN_Q 0xfe00707f +#define MATCH_FSGNJX_Q 0x26002053 +#define MASK_FSGNJX_Q 0xfe00707f +#define MATCH_FMIN_Q 0x2e000053 +#define MASK_FMIN_Q 0xfe00707f +#define MATCH_FMAX_Q 0x2e001053 +#define MASK_FMAX_Q 0xfe00707f +#define MATCH_FCVT_S_Q 0x40300053 +#define MASK_FCVT_S_Q 0xfff0007f +#define MATCH_FCVT_Q_S 0x46000053 +#define MASK_FCVT_Q_S 0xfff0007f +#define MATCH_FCVT_D_Q 0x42300053 +#define MASK_FCVT_D_Q 0xfff0007f +#define MATCH_FCVT_Q_D 0x46100053 +#define MASK_FCVT_Q_D 0xfff0007f +#define MATCH_FSQRT_Q 0x5e000053 +#define MASK_FSQRT_Q 0xfff0007f +#define MATCH_FLE_S 0xa0000053 +#define MASK_FLE_S 0xfe00707f +#define MATCH_FLT_S 0xa0001053 +#define MASK_FLT_S 0xfe00707f +#define MATCH_FEQ_S 0xa0002053 +#define MASK_FEQ_S 0xfe00707f +#define MATCH_FLE_D 0xa2000053 +#define MASK_FLE_D 0xfe00707f +#define MATCH_FLT_D 0xa2001053 +#define MASK_FLT_D 0xfe00707f +#define MATCH_FEQ_D 0xa2002053 +#define MASK_FEQ_D 0xfe00707f +#define MATCH_FLE_Q 0xa6000053 +#define MASK_FLE_Q 0xfe00707f +#define MATCH_FLT_Q 0xa6001053 +#define MASK_FLT_Q 0xfe00707f +#define MATCH_FEQ_Q 0xa6002053 +#define MASK_FEQ_Q 0xfe00707f +#define MATCH_FCVT_W_S 0xc0000053 +#define MASK_FCVT_W_S 0xfff0007f +#define MATCH_FCVT_WU_S 0xc0100053 +#define MASK_FCVT_WU_S 0xfff0007f +#define MATCH_FCVT_L_S 0xc0200053 +#define MASK_FCVT_L_S 0xfff0007f +#define MATCH_FCVT_LU_S 0xc0300053 +#define MASK_FCVT_LU_S 0xfff0007f +#define MATCH_FMV_X_W 0xe0000053 +#define MASK_FMV_X_W 0xfff0707f +#define MATCH_FCLASS_S 0xe0001053 +#define MASK_FCLASS_S 0xfff0707f +#define MATCH_FCVT_W_D 0xc2000053 +#define MASK_FCVT_W_D 0xfff0007f +#define MATCH_FCVT_WU_D 0xc2100053 +#define MASK_FCVT_WU_D 0xfff0007f +#define MATCH_FCVT_L_D 0xc2200053 +#define MASK_FCVT_L_D 0xfff0007f +#define MATCH_FCVT_LU_D 0xc2300053 +#define MASK_FCVT_LU_D 0xfff0007f +#define MATCH_FMV_X_D 0xe2000053 +#define MASK_FMV_X_D 0xfff0707f +#define MATCH_FCLASS_D 0xe2001053 +#define MASK_FCLASS_D 0xfff0707f +#define MATCH_FCVT_W_Q 0xc6000053 +#define MASK_FCVT_W_Q 0xfff0007f +#define MATCH_FCVT_WU_Q 0xc6100053 +#define MASK_FCVT_WU_Q 0xfff0007f +#define MATCH_FCVT_L_Q 0xc6200053 +#define MASK_FCVT_L_Q 0xfff0007f +#define MATCH_FCVT_LU_Q 0xc6300053 +#define MASK_FCVT_LU_Q 0xfff0007f +#define MATCH_FMV_X_Q 0xe6000053 +#define MASK_FMV_X_Q 0xfff0707f +#define MATCH_FCLASS_Q 0xe6001053 +#define MASK_FCLASS_Q 0xfff0707f +#define MATCH_FCVT_S_W 0xd0000053 +#define MASK_FCVT_S_W 0xfff0007f +#define MATCH_FCVT_S_WU 0xd0100053 +#define MASK_FCVT_S_WU 0xfff0007f +#define MATCH_FCVT_S_L 0xd0200053 +#define MASK_FCVT_S_L 0xfff0007f +#define MATCH_FCVT_S_LU 0xd0300053 +#define MASK_FCVT_S_LU 0xfff0007f +#define MATCH_FMV_W_X 0xf0000053 +#define MASK_FMV_W_X 0xfff0707f +#define MATCH_FCVT_D_W 0xd2000053 +#define MASK_FCVT_D_W 0xfff0007f +#define MATCH_FCVT_D_WU 0xd2100053 +#define MASK_FCVT_D_WU 0xfff0007f +#define MATCH_FCVT_D_L 0xd2200053 +#define MASK_FCVT_D_L 0xfff0007f +#define MATCH_FCVT_D_LU 0xd2300053 +#define MASK_FCVT_D_LU 0xfff0007f +#define MATCH_FMV_D_X 0xf2000053 +#define MASK_FMV_D_X 0xfff0707f +#define MATCH_FCVT_Q_W 0xd6000053 +#define MASK_FCVT_Q_W 0xfff0007f +#define MATCH_FCVT_Q_WU 0xd6100053 +#define MASK_FCVT_Q_WU 0xfff0007f +#define MATCH_FCVT_Q_L 0xd6200053 +#define MASK_FCVT_Q_L 0xfff0007f +#define MATCH_FCVT_Q_LU 0xd6300053 +#define MASK_FCVT_Q_LU 0xfff0007f +#define MATCH_FMV_Q_X 0xf6000053 +#define MASK_FMV_Q_X 0xfff0707f +#define MATCH_FLW 0x2007 +#define MASK_FLW 0x707f +#define MATCH_FLD 0x3007 +#define MASK_FLD 0x707f +#define MATCH_FLQ 0x4007 +#define MASK_FLQ 0x707f +#define MATCH_FSW 0x2027 +#define MASK_FSW 0x707f +#define MATCH_FSD 0x3027 +#define MASK_FSD 0x707f +#define MATCH_FSQ 0x4027 +#define MASK_FSQ 0x707f +#define MATCH_FMADD_S 0x43 +#define MASK_FMADD_S 0x600007f +#define MATCH_FMSUB_S 0x47 +#define MASK_FMSUB_S 0x600007f +#define MATCH_FNMSUB_S 0x4b +#define MASK_FNMSUB_S 0x600007f +#define MATCH_FNMADD_S 0x4f +#define MASK_FNMADD_S 0x600007f +#define MATCH_FMADD_D 0x2000043 +#define MASK_FMADD_D 0x600007f +#define MATCH_FMSUB_D 0x2000047 +#define MASK_FMSUB_D 0x600007f +#define MATCH_FNMSUB_D 0x200004b +#define MASK_FNMSUB_D 0x600007f +#define MATCH_FNMADD_D 0x200004f +#define MASK_FNMADD_D 0x600007f +#define MATCH_FMADD_Q 0x6000043 +#define MASK_FMADD_Q 0x600007f +#define MATCH_FMSUB_Q 0x6000047 +#define MASK_FMSUB_Q 0x600007f +#define MATCH_FNMSUB_Q 0x600004b +#define MASK_FNMSUB_Q 0x600007f +#define MATCH_FNMADD_Q 0x600004f +#define MASK_FNMADD_Q 0x600007f +#define MATCH_C_NOP 0x1 +#define MASK_C_NOP 0xffff +#define MATCH_C_ADDI16SP 0x6101 +#define MASK_C_ADDI16SP 0xef83 +#define MATCH_C_JR 0x8002 +#define MASK_C_JR 0xf07f +#define MATCH_C_JALR 0x9002 +#define MASK_C_JALR 0xf07f +#define MATCH_C_EBREAK 0x9002 +#define MASK_C_EBREAK 0xffff +#define MATCH_C_LD 0x6000 +#define MASK_C_LD 0xe003 +#define MATCH_C_SD 0xe000 +#define MASK_C_SD 0xe003 +#define MATCH_C_ADDIW 0x2001 +#define MASK_C_ADDIW 0xe003 +#define MATCH_C_LDSP 0x6002 +#define MASK_C_LDSP 0xe003 +#define MATCH_C_SDSP 0xe002 +#define MASK_C_SDSP 0xe003 +#define MATCH_C_ADDI4SPN 0x0 +#define MASK_C_ADDI4SPN 0xe003 +#define MATCH_C_FLD 0x2000 +#define MASK_C_FLD 0xe003 +#define MATCH_C_LW 0x4000 +#define MASK_C_LW 0xe003 +#define MATCH_C_FLW 0x6000 +#define MASK_C_FLW 0xe003 +#define MATCH_C_FSD 0xa000 +#define MASK_C_FSD 0xe003 +#define MATCH_C_SW 0xc000 +#define MASK_C_SW 0xe003 +#define MATCH_C_FSW 0xe000 +#define MASK_C_FSW 0xe003 +#define MATCH_C_ADDI 0x1 +#define MASK_C_ADDI 0xe003 +#define MATCH_C_JAL 0x2001 +#define MASK_C_JAL 0xe003 +#define MATCH_C_LI 0x4001 +#define MASK_C_LI 0xe003 +#define MATCH_C_LUI 0x6001 +#define MASK_C_LUI 0xe003 +#define MATCH_C_SRLI 0x8001 +#define MASK_C_SRLI 0xec03 +#define MATCH_C_SRAI 0x8401 +#define MASK_C_SRAI 0xec03 +#define MATCH_C_ANDI 0x8801 +#define MASK_C_ANDI 0xec03 +#define MATCH_C_SUB 0x8c01 +#define MASK_C_SUB 0xfc63 +#define MATCH_C_XOR 0x8c21 +#define MASK_C_XOR 0xfc63 +#define MATCH_C_OR 0x8c41 +#define MASK_C_OR 0xfc63 +#define MATCH_C_AND 0x8c61 +#define MASK_C_AND 0xfc63 +#define MATCH_C_SUBW 0x9c01 +#define MASK_C_SUBW 0xfc63 +#define MATCH_C_ADDW 0x9c21 +#define MASK_C_ADDW 0xfc63 +#define MATCH_C_J 0xa001 +#define MASK_C_J 0xe003 +#define MATCH_C_BEQZ 0xc001 +#define MASK_C_BEQZ 0xe003 +#define MATCH_C_BNEZ 0xe001 +#define MASK_C_BNEZ 0xe003 +#define MATCH_C_SLLI 0x2 +#define MASK_C_SLLI 0xe003 +#define MATCH_C_FLDSP 0x2002 +#define MASK_C_FLDSP 0xe003 +#define MATCH_C_LWSP 0x4002 +#define MASK_C_LWSP 0xe003 +#define MATCH_C_FLWSP 0x6002 +#define MASK_C_FLWSP 0xe003 +#define MATCH_C_MV 0x8002 +#define MASK_C_MV 0xf003 +#define MATCH_C_ADD 0x9002 +#define MASK_C_ADD 0xf003 +#define MATCH_C_FSDSP 0xa002 +#define MASK_C_FSDSP 0xe003 +#define MATCH_C_SWSP 0xc002 +#define MASK_C_SWSP 0xe003 +#define MATCH_C_FSWSP 0xe002 +#define MASK_C_FSWSP 0xe003 +#define MATCH_CUSTOM0 0xb +#define MASK_CUSTOM0 0x707f +#define MATCH_CUSTOM0_RS1 0x200b +#define MASK_CUSTOM0_RS1 0x707f +#define MATCH_CUSTOM0_RS1_RS2 0x300b +#define MASK_CUSTOM0_RS1_RS2 0x707f +#define MATCH_CUSTOM0_RD 0x400b +#define MASK_CUSTOM0_RD 0x707f +#define MATCH_CUSTOM0_RD_RS1 0x600b +#define MASK_CUSTOM0_RD_RS1 0x707f +#define MATCH_CUSTOM0_RD_RS1_RS2 0x700b +#define MASK_CUSTOM0_RD_RS1_RS2 0x707f +#define MATCH_CUSTOM1 0x2b +#define MASK_CUSTOM1 0x707f +#define MATCH_CUSTOM1_RS1 0x202b +#define MASK_CUSTOM1_RS1 0x707f +#define MATCH_CUSTOM1_RS1_RS2 0x302b +#define MASK_CUSTOM1_RS1_RS2 0x707f +#define MATCH_CUSTOM1_RD 0x402b +#define MASK_CUSTOM1_RD 0x707f +#define MATCH_CUSTOM1_RD_RS1 0x602b +#define MASK_CUSTOM1_RD_RS1 0x707f +#define MATCH_CUSTOM1_RD_RS1_RS2 0x702b +#define MASK_CUSTOM1_RD_RS1_RS2 0x707f +#define MATCH_CUSTOM2 0x5b +#define MASK_CUSTOM2 0x707f +#define MATCH_CUSTOM2_RS1 0x205b +#define MASK_CUSTOM2_RS1 0x707f +#define MATCH_CUSTOM2_RS1_RS2 0x305b +#define MASK_CUSTOM2_RS1_RS2 0x707f +#define MATCH_CUSTOM2_RD 0x405b +#define MASK_CUSTOM2_RD 0x707f +#define MATCH_CUSTOM2_RD_RS1 0x605b +#define MASK_CUSTOM2_RD_RS1 0x707f +#define MATCH_CUSTOM2_RD_RS1_RS2 0x705b +#define MASK_CUSTOM2_RD_RS1_RS2 0x707f +#define MATCH_CUSTOM3 0x7b +#define MASK_CUSTOM3 0x707f +#define MATCH_CUSTOM3_RS1 0x207b +#define MASK_CUSTOM3_RS1 0x707f +#define MATCH_CUSTOM3_RS1_RS2 0x307b +#define MASK_CUSTOM3_RS1_RS2 0x707f +#define MATCH_CUSTOM3_RD 0x407b +#define MASK_CUSTOM3_RD 0x707f +#define MATCH_CUSTOM3_RD_RS1 0x607b +#define MASK_CUSTOM3_RD_RS1 0x707f +#define MATCH_CUSTOM3_RD_RS1_RS2 0x707b +#define MASK_CUSTOM3_RD_RS1_RS2 0x707f +#define CSR_FFLAGS 0x1 +#define CSR_FRM 0x2 +#define CSR_FCSR 0x3 +#define CSR_CYCLE 0xc00 +#define CSR_TIME 0xc01 +#define CSR_INSTRET 0xc02 +#define CSR_HPMCOUNTER3 0xc03 +#define CSR_HPMCOUNTER4 0xc04 +#define CSR_HPMCOUNTER5 0xc05 +#define CSR_HPMCOUNTER6 0xc06 +#define CSR_HPMCOUNTER7 0xc07 +#define CSR_HPMCOUNTER8 0xc08 +#define CSR_HPMCOUNTER9 0xc09 +#define CSR_HPMCOUNTER10 0xc0a +#define CSR_HPMCOUNTER11 0xc0b +#define CSR_HPMCOUNTER12 0xc0c +#define CSR_HPMCOUNTER13 0xc0d +#define CSR_HPMCOUNTER14 0xc0e +#define CSR_HPMCOUNTER15 0xc0f +#define CSR_HPMCOUNTER16 0xc10 +#define CSR_HPMCOUNTER17 0xc11 +#define CSR_HPMCOUNTER18 0xc12 +#define CSR_HPMCOUNTER19 0xc13 +#define CSR_HPMCOUNTER20 0xc14 +#define CSR_HPMCOUNTER21 0xc15 +#define CSR_HPMCOUNTER22 0xc16 +#define CSR_HPMCOUNTER23 0xc17 +#define CSR_HPMCOUNTER24 0xc18 +#define CSR_HPMCOUNTER25 0xc19 +#define CSR_HPMCOUNTER26 0xc1a +#define CSR_HPMCOUNTER27 0xc1b +#define CSR_HPMCOUNTER28 0xc1c +#define CSR_HPMCOUNTER29 0xc1d +#define CSR_HPMCOUNTER30 0xc1e +#define CSR_HPMCOUNTER31 0xc1f +#define CSR_SSTATUS 0x100 +#define CSR_SIE 0x104 +#define CSR_STVEC 0x105 +#define CSR_SCOUNTEREN 0x106 +#define CSR_SSCRATCH 0x140 +#define CSR_SEPC 0x141 +#define CSR_SCAUSE 0x142 +#define CSR_STVAL 0x143 +#define CSR_SIP 0x144 +#define CSR_SATP 0x180 +#define CSR_MSTATUS 0x300 +#define CSR_MISA 0x301 +#define CSR_MEDELEG 0x302 +#define CSR_MIDELEG 0x303 +#define CSR_MIE 0x304 +#define CSR_MTVEC 0x305 +#define CSR_MCOUNTEREN 0x306 +#define CSR_MSCRATCH 0x340 +#define CSR_MEPC 0x341 +#define CSR_MCAUSE 0x342 +#define CSR_MTVAL 0x343 +#define CSR_MIP 0x344 +#define CSR_PMPCFG0 0x3a0 +#define CSR_PMPCFG1 0x3a1 +#define CSR_PMPCFG2 0x3a2 +#define CSR_PMPCFG3 0x3a3 +#define CSR_PMPADDR0 0x3b0 +#define CSR_PMPADDR1 0x3b1 +#define CSR_PMPADDR2 0x3b2 +#define CSR_PMPADDR3 0x3b3 +#define CSR_PMPADDR4 0x3b4 +#define CSR_PMPADDR5 0x3b5 +#define CSR_PMPADDR6 0x3b6 +#define CSR_PMPADDR7 0x3b7 +#define CSR_PMPADDR8 0x3b8 +#define CSR_PMPADDR9 0x3b9 +#define CSR_PMPADDR10 0x3ba +#define CSR_PMPADDR11 0x3bb +#define CSR_PMPADDR12 0x3bc +#define CSR_PMPADDR13 0x3bd +#define CSR_PMPADDR14 0x3be +#define CSR_PMPADDR15 0x3bf +#define CSR_TSELECT 0x7a0 +#define CSR_TDATA1 0x7a1 +#define CSR_TDATA2 0x7a2 +#define CSR_TDATA3 0x7a3 +#define CSR_DCSR 0x7b0 +#define CSR_DPC 0x7b1 +#define CSR_DSCRATCH 0x7b2 +#define CSR_MCYCLE 0xb00 +#define CSR_MINSTRET 0xb02 +#define CSR_MHPMCOUNTER3 0xb03 +#define CSR_MHPMCOUNTER4 0xb04 +#define CSR_MHPMCOUNTER5 0xb05 +#define CSR_MHPMCOUNTER6 0xb06 +#define CSR_MHPMCOUNTER7 0xb07 +#define CSR_MHPMCOUNTER8 0xb08 +#define CSR_MHPMCOUNTER9 0xb09 +#define CSR_MHPMCOUNTER10 0xb0a +#define CSR_MHPMCOUNTER11 0xb0b +#define CSR_MHPMCOUNTER12 0xb0c +#define CSR_MHPMCOUNTER13 0xb0d +#define CSR_MHPMCOUNTER14 0xb0e +#define CSR_MHPMCOUNTER15 0xb0f +#define CSR_MHPMCOUNTER16 0xb10 +#define CSR_MHPMCOUNTER17 0xb11 +#define CSR_MHPMCOUNTER18 0xb12 +#define CSR_MHPMCOUNTER19 0xb13 +#define CSR_MHPMCOUNTER20 0xb14 +#define CSR_MHPMCOUNTER21 0xb15 +#define CSR_MHPMCOUNTER22 0xb16 +#define CSR_MHPMCOUNTER23 0xb17 +#define CSR_MHPMCOUNTER24 0xb18 +#define CSR_MHPMCOUNTER25 0xb19 +#define CSR_MHPMCOUNTER26 0xb1a +#define CSR_MHPMCOUNTER27 0xb1b +#define CSR_MHPMCOUNTER28 0xb1c +#define CSR_MHPMCOUNTER29 0xb1d +#define CSR_MHPMCOUNTER30 0xb1e +#define CSR_MHPMCOUNTER31 0xb1f +#define CSR_MHPMEVENT3 0x323 +#define CSR_MHPMEVENT4 0x324 +#define CSR_MHPMEVENT5 0x325 +#define CSR_MHPMEVENT6 0x326 +#define CSR_MHPMEVENT7 0x327 +#define CSR_MHPMEVENT8 0x328 +#define CSR_MHPMEVENT9 0x329 +#define CSR_MHPMEVENT10 0x32a +#define CSR_MHPMEVENT11 0x32b +#define CSR_MHPMEVENT12 0x32c +#define CSR_MHPMEVENT13 0x32d +#define CSR_MHPMEVENT14 0x32e +#define CSR_MHPMEVENT15 0x32f +#define CSR_MHPMEVENT16 0x330 +#define CSR_MHPMEVENT17 0x331 +#define CSR_MHPMEVENT18 0x332 +#define CSR_MHPMEVENT19 0x333 +#define CSR_MHPMEVENT20 0x334 +#define CSR_MHPMEVENT21 0x335 +#define CSR_MHPMEVENT22 0x336 +#define CSR_MHPMEVENT23 0x337 +#define CSR_MHPMEVENT24 0x338 +#define CSR_MHPMEVENT25 0x339 +#define CSR_MHPMEVENT26 0x33a +#define CSR_MHPMEVENT27 0x33b +#define CSR_MHPMEVENT28 0x33c +#define CSR_MHPMEVENT29 0x33d +#define CSR_MHPMEVENT30 0x33e +#define CSR_MHPMEVENT31 0x33f +#define CSR_MVENDORID 0xf11 +#define CSR_MARCHID 0xf12 +#define CSR_MIMPID 0xf13 +#define CSR_MHARTID 0xf14 +#define CSR_CYCLEH 0xc80 +#define CSR_TIMEH 0xc81 +#define CSR_INSTRETH 0xc82 +#define CSR_HPMCOUNTER3H 0xc83 +#define CSR_HPMCOUNTER4H 0xc84 +#define CSR_HPMCOUNTER5H 0xc85 +#define CSR_HPMCOUNTER6H 0xc86 +#define CSR_HPMCOUNTER7H 0xc87 +#define CSR_HPMCOUNTER8H 0xc88 +#define CSR_HPMCOUNTER9H 0xc89 +#define CSR_HPMCOUNTER10H 0xc8a +#define CSR_HPMCOUNTER11H 0xc8b +#define CSR_HPMCOUNTER12H 0xc8c +#define CSR_HPMCOUNTER13H 0xc8d +#define CSR_HPMCOUNTER14H 0xc8e +#define CSR_HPMCOUNTER15H 0xc8f +#define CSR_HPMCOUNTER16H 0xc90 +#define CSR_HPMCOUNTER17H 0xc91 +#define CSR_HPMCOUNTER18H 0xc92 +#define CSR_HPMCOUNTER19H 0xc93 +#define CSR_HPMCOUNTER20H 0xc94 +#define CSR_HPMCOUNTER21H 0xc95 +#define CSR_HPMCOUNTER22H 0xc96 +#define CSR_HPMCOUNTER23H 0xc97 +#define CSR_HPMCOUNTER24H 0xc98 +#define CSR_HPMCOUNTER25H 0xc99 +#define CSR_HPMCOUNTER26H 0xc9a +#define CSR_HPMCOUNTER27H 0xc9b +#define CSR_HPMCOUNTER28H 0xc9c +#define CSR_HPMCOUNTER29H 0xc9d +#define CSR_HPMCOUNTER30H 0xc9e +#define CSR_HPMCOUNTER31H 0xc9f +#define CSR_MCYCLEH 0xb80 +#define CSR_MINSTRETH 0xb82 +#define CSR_MHPMCOUNTER3H 0xb83 +#define CSR_MHPMCOUNTER4H 0xb84 +#define CSR_MHPMCOUNTER5H 0xb85 +#define CSR_MHPMCOUNTER6H 0xb86 +#define CSR_MHPMCOUNTER7H 0xb87 +#define CSR_MHPMCOUNTER8H 0xb88 +#define CSR_MHPMCOUNTER9H 0xb89 +#define CSR_MHPMCOUNTER10H 0xb8a +#define CSR_MHPMCOUNTER11H 0xb8b +#define CSR_MHPMCOUNTER12H 0xb8c +#define CSR_MHPMCOUNTER13H 0xb8d +#define CSR_MHPMCOUNTER14H 0xb8e +#define CSR_MHPMCOUNTER15H 0xb8f +#define CSR_MHPMCOUNTER16H 0xb90 +#define CSR_MHPMCOUNTER17H 0xb91 +#define CSR_MHPMCOUNTER18H 0xb92 +#define CSR_MHPMCOUNTER19H 0xb93 +#define CSR_MHPMCOUNTER20H 0xb94 +#define CSR_MHPMCOUNTER21H 0xb95 +#define CSR_MHPMCOUNTER22H 0xb96 +#define CSR_MHPMCOUNTER23H 0xb97 +#define CSR_MHPMCOUNTER24H 0xb98 +#define CSR_MHPMCOUNTER25H 0xb99 +#define CSR_MHPMCOUNTER26H 0xb9a +#define CSR_MHPMCOUNTER27H 0xb9b +#define CSR_MHPMCOUNTER28H 0xb9c +#define CSR_MHPMCOUNTER29H 0xb9d +#define CSR_MHPMCOUNTER30H 0xb9e +#define CSR_MHPMCOUNTER31H 0xb9f +#define CAUSE_MISALIGNED_FETCH 0x0 +#define CAUSE_FETCH_ACCESS 0x1 +#define CAUSE_ILLEGAL_INSTRUCTION 0x2 +#define CAUSE_BREAKPOINT 0x3 +#define CAUSE_MISALIGNED_LOAD 0x4 +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_MISALIGNED_STORE 0x6 +#define CAUSE_STORE_ACCESS 0x7 +#define CAUSE_USER_ECALL 0x8 +#define CAUSE_SUPERVISOR_ECALL 0x9 +#define CAUSE_HYPERVISOR_ECALL 0xa +#define CAUSE_MACHINE_ECALL 0xb +#define CAUSE_FETCH_PAGE_FAULT 0xc +#define CAUSE_LOAD_PAGE_FAULT 0xd +#define CAUSE_STORE_PAGE_FAULT 0xf +#endif +#ifdef DECLARE_INSN +DECLARE_INSN(beq, MATCH_BEQ, MASK_BEQ) +DECLARE_INSN(bne, MATCH_BNE, MASK_BNE) +DECLARE_INSN(blt, MATCH_BLT, MASK_BLT) +DECLARE_INSN(bge, MATCH_BGE, MASK_BGE) +DECLARE_INSN(bltu, MATCH_BLTU, MASK_BLTU) +DECLARE_INSN(bgeu, MATCH_BGEU, MASK_BGEU) +DECLARE_INSN(jalr, MATCH_JALR, MASK_JALR) +DECLARE_INSN(jal, MATCH_JAL, MASK_JAL) +DECLARE_INSN(lui, MATCH_LUI, MASK_LUI) +DECLARE_INSN(auipc, MATCH_AUIPC, MASK_AUIPC) +DECLARE_INSN(addi, MATCH_ADDI, MASK_ADDI) +DECLARE_INSN(slli, MATCH_SLLI, MASK_SLLI) +DECLARE_INSN(slti, MATCH_SLTI, MASK_SLTI) +DECLARE_INSN(sltiu, MATCH_SLTIU, MASK_SLTIU) +DECLARE_INSN(xori, MATCH_XORI, MASK_XORI) +DECLARE_INSN(srli, MATCH_SRLI, MASK_SRLI) +DECLARE_INSN(srai, MATCH_SRAI, MASK_SRAI) +DECLARE_INSN(ori, MATCH_ORI, MASK_ORI) +DECLARE_INSN(andi, MATCH_ANDI, MASK_ANDI) +DECLARE_INSN(add, MATCH_ADD, MASK_ADD) +DECLARE_INSN(sub, MATCH_SUB, MASK_SUB) +DECLARE_INSN(sll, MATCH_SLL, MASK_SLL) +DECLARE_INSN(slt, MATCH_SLT, MASK_SLT) +DECLARE_INSN(sltu, MATCH_SLTU, MASK_SLTU) +DECLARE_INSN(xor, MATCH_XOR, MASK_XOR) +DECLARE_INSN(srl, MATCH_SRL, MASK_SRL) +DECLARE_INSN(sra, MATCH_SRA, MASK_SRA) +DECLARE_INSN(or, MATCH_OR, MASK_OR) +DECLARE_INSN(and, MATCH_AND, MASK_AND) +DECLARE_INSN(addiw, MATCH_ADDIW, MASK_ADDIW) +DECLARE_INSN(slliw, MATCH_SLLIW, MASK_SLLIW) +DECLARE_INSN(srliw, MATCH_SRLIW, MASK_SRLIW) +DECLARE_INSN(sraiw, MATCH_SRAIW, MASK_SRAIW) +DECLARE_INSN(addw, MATCH_ADDW, MASK_ADDW) +DECLARE_INSN(subw, MATCH_SUBW, MASK_SUBW) +DECLARE_INSN(sllw, MATCH_SLLW, MASK_SLLW) +DECLARE_INSN(srlw, MATCH_SRLW, MASK_SRLW) +DECLARE_INSN(sraw, MATCH_SRAW, MASK_SRAW) +DECLARE_INSN(lb, MATCH_LB, MASK_LB) +DECLARE_INSN(lh, MATCH_LH, MASK_LH) +DECLARE_INSN(lw, MATCH_LW, MASK_LW) +DECLARE_INSN(ld, MATCH_LD, MASK_LD) +DECLARE_INSN(lbu, MATCH_LBU, MASK_LBU) +DECLARE_INSN(lhu, MATCH_LHU, MASK_LHU) +DECLARE_INSN(lwu, MATCH_LWU, MASK_LWU) +DECLARE_INSN(sb, MATCH_SB, MASK_SB) +DECLARE_INSN(sh, MATCH_SH, MASK_SH) +DECLARE_INSN(sw, MATCH_SW, MASK_SW) +DECLARE_INSN(sd, MATCH_SD, MASK_SD) +DECLARE_INSN(fence, MATCH_FENCE, MASK_FENCE) +DECLARE_INSN(fence_i, MATCH_FENCE_I, MASK_FENCE_I) +DECLARE_INSN(mul, MATCH_MUL, MASK_MUL) +DECLARE_INSN(mulh, MATCH_MULH, MASK_MULH) +DECLARE_INSN(mulhsu, MATCH_MULHSU, MASK_MULHSU) +DECLARE_INSN(mulhu, MATCH_MULHU, MASK_MULHU) +DECLARE_INSN(div, MATCH_DIV, MASK_DIV) +DECLARE_INSN(divu, MATCH_DIVU, MASK_DIVU) +DECLARE_INSN(rem, MATCH_REM, MASK_REM) +DECLARE_INSN(remu, MATCH_REMU, MASK_REMU) +DECLARE_INSN(mulw, MATCH_MULW, MASK_MULW) +DECLARE_INSN(divw, MATCH_DIVW, MASK_DIVW) +DECLARE_INSN(divuw, MATCH_DIVUW, MASK_DIVUW) +DECLARE_INSN(remw, MATCH_REMW, MASK_REMW) +DECLARE_INSN(remuw, MATCH_REMUW, MASK_REMUW) +DECLARE_INSN(amoadd_w, MATCH_AMOADD_W, MASK_AMOADD_W) +DECLARE_INSN(amoxor_w, MATCH_AMOXOR_W, MASK_AMOXOR_W) +DECLARE_INSN(amoor_w, MATCH_AMOOR_W, MASK_AMOOR_W) +DECLARE_INSN(amoand_w, MATCH_AMOAND_W, MASK_AMOAND_W) +DECLARE_INSN(amomin_w, MATCH_AMOMIN_W, MASK_AMOMIN_W) +DECLARE_INSN(amomax_w, MATCH_AMOMAX_W, MASK_AMOMAX_W) +DECLARE_INSN(amominu_w, MATCH_AMOMINU_W, MASK_AMOMINU_W) +DECLARE_INSN(amomaxu_w, MATCH_AMOMAXU_W, MASK_AMOMAXU_W) +DECLARE_INSN(amoswap_w, MATCH_AMOSWAP_W, MASK_AMOSWAP_W) +DECLARE_INSN(lr_w, MATCH_LR_W, MASK_LR_W) +DECLARE_INSN(sc_w, MATCH_SC_W, MASK_SC_W) +DECLARE_INSN(amoadd_d, MATCH_AMOADD_D, MASK_AMOADD_D) +DECLARE_INSN(amoxor_d, MATCH_AMOXOR_D, MASK_AMOXOR_D) +DECLARE_INSN(amoor_d, MATCH_AMOOR_D, MASK_AMOOR_D) +DECLARE_INSN(amoand_d, MATCH_AMOAND_D, MASK_AMOAND_D) +DECLARE_INSN(amomin_d, MATCH_AMOMIN_D, MASK_AMOMIN_D) +DECLARE_INSN(amomax_d, MATCH_AMOMAX_D, MASK_AMOMAX_D) +DECLARE_INSN(amominu_d, MATCH_AMOMINU_D, MASK_AMOMINU_D) +DECLARE_INSN(amomaxu_d, MATCH_AMOMAXU_D, MASK_AMOMAXU_D) +DECLARE_INSN(amoswap_d, MATCH_AMOSWAP_D, MASK_AMOSWAP_D) +DECLARE_INSN(lr_d, MATCH_LR_D, MASK_LR_D) +DECLARE_INSN(sc_d, MATCH_SC_D, MASK_SC_D) +DECLARE_INSN(ecall, MATCH_ECALL, MASK_ECALL) +DECLARE_INSN(ebreak, MATCH_EBREAK, MASK_EBREAK) +DECLARE_INSN(uret, MATCH_URET, MASK_URET) +DECLARE_INSN(sret, MATCH_SRET, MASK_SRET) +DECLARE_INSN(mret, MATCH_MRET, MASK_MRET) +DECLARE_INSN(dret, MATCH_DRET, MASK_DRET) +DECLARE_INSN(sfence_vma, MATCH_SFENCE_VMA, MASK_SFENCE_VMA) +DECLARE_INSN(wfi, MATCH_WFI, MASK_WFI) +DECLARE_INSN(csrrw, MATCH_CSRRW, MASK_CSRRW) +DECLARE_INSN(csrrs, MATCH_CSRRS, MASK_CSRRS) +DECLARE_INSN(csrrc, MATCH_CSRRC, MASK_CSRRC) +DECLARE_INSN(csrrwi, MATCH_CSRRWI, MASK_CSRRWI) +DECLARE_INSN(csrrsi, MATCH_CSRRSI, MASK_CSRRSI) +DECLARE_INSN(csrrci, MATCH_CSRRCI, MASK_CSRRCI) +DECLARE_INSN(fadd_s, MATCH_FADD_S, MASK_FADD_S) +DECLARE_INSN(fsub_s, MATCH_FSUB_S, MASK_FSUB_S) +DECLARE_INSN(fmul_s, MATCH_FMUL_S, MASK_FMUL_S) +DECLARE_INSN(fdiv_s, MATCH_FDIV_S, MASK_FDIV_S) +DECLARE_INSN(fsgnj_s, MATCH_FSGNJ_S, MASK_FSGNJ_S) +DECLARE_INSN(fsgnjn_s, MATCH_FSGNJN_S, MASK_FSGNJN_S) +DECLARE_INSN(fsgnjx_s, MATCH_FSGNJX_S, MASK_FSGNJX_S) +DECLARE_INSN(fmin_s, MATCH_FMIN_S, MASK_FMIN_S) +DECLARE_INSN(fmax_s, MATCH_FMAX_S, MASK_FMAX_S) +DECLARE_INSN(fsqrt_s, MATCH_FSQRT_S, MASK_FSQRT_S) +DECLARE_INSN(fadd_d, MATCH_FADD_D, MASK_FADD_D) +DECLARE_INSN(fsub_d, MATCH_FSUB_D, MASK_FSUB_D) +DECLARE_INSN(fmul_d, MATCH_FMUL_D, MASK_FMUL_D) +DECLARE_INSN(fdiv_d, MATCH_FDIV_D, MASK_FDIV_D) +DECLARE_INSN(fsgnj_d, MATCH_FSGNJ_D, MASK_FSGNJ_D) +DECLARE_INSN(fsgnjn_d, MATCH_FSGNJN_D, MASK_FSGNJN_D) +DECLARE_INSN(fsgnjx_d, MATCH_FSGNJX_D, MASK_FSGNJX_D) +DECLARE_INSN(fmin_d, MATCH_FMIN_D, MASK_FMIN_D) +DECLARE_INSN(fmax_d, MATCH_FMAX_D, MASK_FMAX_D) +DECLARE_INSN(fcvt_s_d, MATCH_FCVT_S_D, MASK_FCVT_S_D) +DECLARE_INSN(fcvt_d_s, MATCH_FCVT_D_S, MASK_FCVT_D_S) +DECLARE_INSN(fsqrt_d, MATCH_FSQRT_D, MASK_FSQRT_D) +DECLARE_INSN(fadd_q, MATCH_FADD_Q, MASK_FADD_Q) +DECLARE_INSN(fsub_q, MATCH_FSUB_Q, MASK_FSUB_Q) +DECLARE_INSN(fmul_q, MATCH_FMUL_Q, MASK_FMUL_Q) +DECLARE_INSN(fdiv_q, MATCH_FDIV_Q, MASK_FDIV_Q) +DECLARE_INSN(fsgnj_q, MATCH_FSGNJ_Q, MASK_FSGNJ_Q) +DECLARE_INSN(fsgnjn_q, MATCH_FSGNJN_Q, MASK_FSGNJN_Q) +DECLARE_INSN(fsgnjx_q, MATCH_FSGNJX_Q, MASK_FSGNJX_Q) +DECLARE_INSN(fmin_q, MATCH_FMIN_Q, MASK_FMIN_Q) +DECLARE_INSN(fmax_q, MATCH_FMAX_Q, MASK_FMAX_Q) +DECLARE_INSN(fcvt_s_q, MATCH_FCVT_S_Q, MASK_FCVT_S_Q) +DECLARE_INSN(fcvt_q_s, MATCH_FCVT_Q_S, MASK_FCVT_Q_S) +DECLARE_INSN(fcvt_d_q, MATCH_FCVT_D_Q, MASK_FCVT_D_Q) +DECLARE_INSN(fcvt_q_d, MATCH_FCVT_Q_D, MASK_FCVT_Q_D) +DECLARE_INSN(fsqrt_q, MATCH_FSQRT_Q, MASK_FSQRT_Q) +DECLARE_INSN(fle_s, MATCH_FLE_S, MASK_FLE_S) +DECLARE_INSN(flt_s, MATCH_FLT_S, MASK_FLT_S) +DECLARE_INSN(feq_s, MATCH_FEQ_S, MASK_FEQ_S) +DECLARE_INSN(fle_d, MATCH_FLE_D, MASK_FLE_D) +DECLARE_INSN(flt_d, MATCH_FLT_D, MASK_FLT_D) +DECLARE_INSN(feq_d, MATCH_FEQ_D, MASK_FEQ_D) +DECLARE_INSN(fle_q, MATCH_FLE_Q, MASK_FLE_Q) +DECLARE_INSN(flt_q, MATCH_FLT_Q, MASK_FLT_Q) +DECLARE_INSN(feq_q, MATCH_FEQ_Q, MASK_FEQ_Q) +DECLARE_INSN(fcvt_w_s, MATCH_FCVT_W_S, MASK_FCVT_W_S) +DECLARE_INSN(fcvt_wu_s, MATCH_FCVT_WU_S, MASK_FCVT_WU_S) +DECLARE_INSN(fcvt_l_s, MATCH_FCVT_L_S, MASK_FCVT_L_S) +DECLARE_INSN(fcvt_lu_s, MATCH_FCVT_LU_S, MASK_FCVT_LU_S) +DECLARE_INSN(fmv_x_w, MATCH_FMV_X_W, MASK_FMV_X_W) +DECLARE_INSN(fclass_s, MATCH_FCLASS_S, MASK_FCLASS_S) +DECLARE_INSN(fcvt_w_d, MATCH_FCVT_W_D, MASK_FCVT_W_D) +DECLARE_INSN(fcvt_wu_d, MATCH_FCVT_WU_D, MASK_FCVT_WU_D) +DECLARE_INSN(fcvt_l_d, MATCH_FCVT_L_D, MASK_FCVT_L_D) +DECLARE_INSN(fcvt_lu_d, MATCH_FCVT_LU_D, MASK_FCVT_LU_D) +DECLARE_INSN(fmv_x_d, MATCH_FMV_X_D, MASK_FMV_X_D) +DECLARE_INSN(fclass_d, MATCH_FCLASS_D, MASK_FCLASS_D) +DECLARE_INSN(fcvt_w_q, MATCH_FCVT_W_Q, MASK_FCVT_W_Q) +DECLARE_INSN(fcvt_wu_q, MATCH_FCVT_WU_Q, MASK_FCVT_WU_Q) +DECLARE_INSN(fcvt_l_q, MATCH_FCVT_L_Q, MASK_FCVT_L_Q) +DECLARE_INSN(fcvt_lu_q, MATCH_FCVT_LU_Q, MASK_FCVT_LU_Q) +DECLARE_INSN(fmv_x_q, MATCH_FMV_X_Q, MASK_FMV_X_Q) +DECLARE_INSN(fclass_q, MATCH_FCLASS_Q, MASK_FCLASS_Q) +DECLARE_INSN(fcvt_s_w, MATCH_FCVT_S_W, MASK_FCVT_S_W) +DECLARE_INSN(fcvt_s_wu, MATCH_FCVT_S_WU, MASK_FCVT_S_WU) +DECLARE_INSN(fcvt_s_l, MATCH_FCVT_S_L, MASK_FCVT_S_L) +DECLARE_INSN(fcvt_s_lu, MATCH_FCVT_S_LU, MASK_FCVT_S_LU) +DECLARE_INSN(fmv_w_x, MATCH_FMV_W_X, MASK_FMV_W_X) +DECLARE_INSN(fcvt_d_w, MATCH_FCVT_D_W, MASK_FCVT_D_W) +DECLARE_INSN(fcvt_d_wu, MATCH_FCVT_D_WU, MASK_FCVT_D_WU) +DECLARE_INSN(fcvt_d_l, MATCH_FCVT_D_L, MASK_FCVT_D_L) +DECLARE_INSN(fcvt_d_lu, MATCH_FCVT_D_LU, MASK_FCVT_D_LU) +DECLARE_INSN(fmv_d_x, MATCH_FMV_D_X, MASK_FMV_D_X) +DECLARE_INSN(fcvt_q_w, MATCH_FCVT_Q_W, MASK_FCVT_Q_W) +DECLARE_INSN(fcvt_q_wu, MATCH_FCVT_Q_WU, MASK_FCVT_Q_WU) +DECLARE_INSN(fcvt_q_l, MATCH_FCVT_Q_L, MASK_FCVT_Q_L) +DECLARE_INSN(fcvt_q_lu, MATCH_FCVT_Q_LU, MASK_FCVT_Q_LU) +DECLARE_INSN(fmv_q_x, MATCH_FMV_Q_X, MASK_FMV_Q_X) +DECLARE_INSN(flw, MATCH_FLW, MASK_FLW) +DECLARE_INSN(fld, MATCH_FLD, MASK_FLD) +DECLARE_INSN(flq, MATCH_FLQ, MASK_FLQ) +DECLARE_INSN(fsw, MATCH_FSW, MASK_FSW) +DECLARE_INSN(fsd, MATCH_FSD, MASK_FSD) +DECLARE_INSN(fsq, MATCH_FSQ, MASK_FSQ) +DECLARE_INSN(fmadd_s, MATCH_FMADD_S, MASK_FMADD_S) +DECLARE_INSN(fmsub_s, MATCH_FMSUB_S, MASK_FMSUB_S) +DECLARE_INSN(fnmsub_s, MATCH_FNMSUB_S, MASK_FNMSUB_S) +DECLARE_INSN(fnmadd_s, MATCH_FNMADD_S, MASK_FNMADD_S) +DECLARE_INSN(fmadd_d, MATCH_FMADD_D, MASK_FMADD_D) +DECLARE_INSN(fmsub_d, MATCH_FMSUB_D, MASK_FMSUB_D) +DECLARE_INSN(fnmsub_d, MATCH_FNMSUB_D, MASK_FNMSUB_D) +DECLARE_INSN(fnmadd_d, MATCH_FNMADD_D, MASK_FNMADD_D) +DECLARE_INSN(fmadd_q, MATCH_FMADD_Q, MASK_FMADD_Q) +DECLARE_INSN(fmsub_q, MATCH_FMSUB_Q, MASK_FMSUB_Q) +DECLARE_INSN(fnmsub_q, MATCH_FNMSUB_Q, MASK_FNMSUB_Q) +DECLARE_INSN(fnmadd_q, MATCH_FNMADD_Q, MASK_FNMADD_Q) +DECLARE_INSN(c_nop, MATCH_C_NOP, MASK_C_NOP) +DECLARE_INSN(c_addi16sp, MATCH_C_ADDI16SP, MASK_C_ADDI16SP) +DECLARE_INSN(c_jr, MATCH_C_JR, MASK_C_JR) +DECLARE_INSN(c_jalr, MATCH_C_JALR, MASK_C_JALR) +DECLARE_INSN(c_ebreak, MATCH_C_EBREAK, MASK_C_EBREAK) +DECLARE_INSN(c_ld, MATCH_C_LD, MASK_C_LD) +DECLARE_INSN(c_sd, MATCH_C_SD, MASK_C_SD) +DECLARE_INSN(c_addiw, MATCH_C_ADDIW, MASK_C_ADDIW) +DECLARE_INSN(c_ldsp, MATCH_C_LDSP, MASK_C_LDSP) +DECLARE_INSN(c_sdsp, MATCH_C_SDSP, MASK_C_SDSP) +DECLARE_INSN(c_addi4spn, MATCH_C_ADDI4SPN, MASK_C_ADDI4SPN) +DECLARE_INSN(c_fld, MATCH_C_FLD, MASK_C_FLD) +DECLARE_INSN(c_lw, MATCH_C_LW, MASK_C_LW) +DECLARE_INSN(c_flw, MATCH_C_FLW, MASK_C_FLW) +DECLARE_INSN(c_fsd, MATCH_C_FSD, MASK_C_FSD) +DECLARE_INSN(c_sw, MATCH_C_SW, MASK_C_SW) +DECLARE_INSN(c_fsw, MATCH_C_FSW, MASK_C_FSW) +DECLARE_INSN(c_addi, MATCH_C_ADDI, MASK_C_ADDI) +DECLARE_INSN(c_jal, MATCH_C_JAL, MASK_C_JAL) +DECLARE_INSN(c_li, MATCH_C_LI, MASK_C_LI) +DECLARE_INSN(c_lui, MATCH_C_LUI, MASK_C_LUI) +DECLARE_INSN(c_srli, MATCH_C_SRLI, MASK_C_SRLI) +DECLARE_INSN(c_srai, MATCH_C_SRAI, MASK_C_SRAI) +DECLARE_INSN(c_andi, MATCH_C_ANDI, MASK_C_ANDI) +DECLARE_INSN(c_sub, MATCH_C_SUB, MASK_C_SUB) +DECLARE_INSN(c_xor, MATCH_C_XOR, MASK_C_XOR) +DECLARE_INSN(c_or, MATCH_C_OR, MASK_C_OR) +DECLARE_INSN(c_and, MATCH_C_AND, MASK_C_AND) +DECLARE_INSN(c_subw, MATCH_C_SUBW, MASK_C_SUBW) +DECLARE_INSN(c_addw, MATCH_C_ADDW, MASK_C_ADDW) +DECLARE_INSN(c_j, MATCH_C_J, MASK_C_J) +DECLARE_INSN(c_beqz, MATCH_C_BEQZ, MASK_C_BEQZ) +DECLARE_INSN(c_bnez, MATCH_C_BNEZ, MASK_C_BNEZ) +DECLARE_INSN(c_slli, MATCH_C_SLLI, MASK_C_SLLI) +DECLARE_INSN(c_fldsp, MATCH_C_FLDSP, MASK_C_FLDSP) +DECLARE_INSN(c_lwsp, MATCH_C_LWSP, MASK_C_LWSP) +DECLARE_INSN(c_flwsp, MATCH_C_FLWSP, MASK_C_FLWSP) +DECLARE_INSN(c_mv, MATCH_C_MV, MASK_C_MV) +DECLARE_INSN(c_add, MATCH_C_ADD, MASK_C_ADD) +DECLARE_INSN(c_fsdsp, MATCH_C_FSDSP, MASK_C_FSDSP) +DECLARE_INSN(c_swsp, MATCH_C_SWSP, MASK_C_SWSP) +DECLARE_INSN(c_fswsp, MATCH_C_FSWSP, MASK_C_FSWSP) +DECLARE_INSN(custom0, MATCH_CUSTOM0, MASK_CUSTOM0) +DECLARE_INSN(custom0_rs1, MATCH_CUSTOM0_RS1, MASK_CUSTOM0_RS1) +DECLARE_INSN(custom0_rs1_rs2, MATCH_CUSTOM0_RS1_RS2, MASK_CUSTOM0_RS1_RS2) +DECLARE_INSN(custom0_rd, MATCH_CUSTOM0_RD, MASK_CUSTOM0_RD) +DECLARE_INSN(custom0_rd_rs1, MATCH_CUSTOM0_RD_RS1, MASK_CUSTOM0_RD_RS1) +DECLARE_INSN(custom0_rd_rs1_rs2, MATCH_CUSTOM0_RD_RS1_RS2, MASK_CUSTOM0_RD_RS1_RS2) +DECLARE_INSN(custom1, MATCH_CUSTOM1, MASK_CUSTOM1) +DECLARE_INSN(custom1_rs1, MATCH_CUSTOM1_RS1, MASK_CUSTOM1_RS1) +DECLARE_INSN(custom1_rs1_rs2, MATCH_CUSTOM1_RS1_RS2, MASK_CUSTOM1_RS1_RS2) +DECLARE_INSN(custom1_rd, MATCH_CUSTOM1_RD, MASK_CUSTOM1_RD) +DECLARE_INSN(custom1_rd_rs1, MATCH_CUSTOM1_RD_RS1, MASK_CUSTOM1_RD_RS1) +DECLARE_INSN(custom1_rd_rs1_rs2, MATCH_CUSTOM1_RD_RS1_RS2, MASK_CUSTOM1_RD_RS1_RS2) +DECLARE_INSN(custom2, MATCH_CUSTOM2, MASK_CUSTOM2) +DECLARE_INSN(custom2_rs1, MATCH_CUSTOM2_RS1, MASK_CUSTOM2_RS1) +DECLARE_INSN(custom2_rs1_rs2, MATCH_CUSTOM2_RS1_RS2, MASK_CUSTOM2_RS1_RS2) +DECLARE_INSN(custom2_rd, MATCH_CUSTOM2_RD, MASK_CUSTOM2_RD) +DECLARE_INSN(custom2_rd_rs1, MATCH_CUSTOM2_RD_RS1, MASK_CUSTOM2_RD_RS1) +DECLARE_INSN(custom2_rd_rs1_rs2, MATCH_CUSTOM2_RD_RS1_RS2, MASK_CUSTOM2_RD_RS1_RS2) +DECLARE_INSN(custom3, MATCH_CUSTOM3, MASK_CUSTOM3) +DECLARE_INSN(custom3_rs1, MATCH_CUSTOM3_RS1, MASK_CUSTOM3_RS1) +DECLARE_INSN(custom3_rs1_rs2, MATCH_CUSTOM3_RS1_RS2, MASK_CUSTOM3_RS1_RS2) +DECLARE_INSN(custom3_rd, MATCH_CUSTOM3_RD, MASK_CUSTOM3_RD) +DECLARE_INSN(custom3_rd_rs1, MATCH_CUSTOM3_RD_RS1, MASK_CUSTOM3_RD_RS1) +DECLARE_INSN(custom3_rd_rs1_rs2, MATCH_CUSTOM3_RD_RS1_RS2, MASK_CUSTOM3_RD_RS1_RS2) +#endif +#ifdef DECLARE_CSR +DECLARE_CSR(fflags, CSR_FFLAGS) +DECLARE_CSR(frm, CSR_FRM) +DECLARE_CSR(fcsr, CSR_FCSR) +DECLARE_CSR(cycle, CSR_CYCLE) +DECLARE_CSR(time, CSR_TIME) +DECLARE_CSR(instret, CSR_INSTRET) +DECLARE_CSR(hpmcounter3, CSR_HPMCOUNTER3) +DECLARE_CSR(hpmcounter4, CSR_HPMCOUNTER4) +DECLARE_CSR(hpmcounter5, CSR_HPMCOUNTER5) +DECLARE_CSR(hpmcounter6, CSR_HPMCOUNTER6) +DECLARE_CSR(hpmcounter7, CSR_HPMCOUNTER7) +DECLARE_CSR(hpmcounter8, CSR_HPMCOUNTER8) +DECLARE_CSR(hpmcounter9, CSR_HPMCOUNTER9) +DECLARE_CSR(hpmcounter10, CSR_HPMCOUNTER10) +DECLARE_CSR(hpmcounter11, CSR_HPMCOUNTER11) +DECLARE_CSR(hpmcounter12, CSR_HPMCOUNTER12) +DECLARE_CSR(hpmcounter13, CSR_HPMCOUNTER13) +DECLARE_CSR(hpmcounter14, CSR_HPMCOUNTER14) +DECLARE_CSR(hpmcounter15, CSR_HPMCOUNTER15) +DECLARE_CSR(hpmcounter16, CSR_HPMCOUNTER16) +DECLARE_CSR(hpmcounter17, CSR_HPMCOUNTER17) +DECLARE_CSR(hpmcounter18, CSR_HPMCOUNTER18) +DECLARE_CSR(hpmcounter19, CSR_HPMCOUNTER19) +DECLARE_CSR(hpmcounter20, CSR_HPMCOUNTER20) +DECLARE_CSR(hpmcounter21, CSR_HPMCOUNTER21) +DECLARE_CSR(hpmcounter22, CSR_HPMCOUNTER22) +DECLARE_CSR(hpmcounter23, CSR_HPMCOUNTER23) +DECLARE_CSR(hpmcounter24, CSR_HPMCOUNTER24) +DECLARE_CSR(hpmcounter25, CSR_HPMCOUNTER25) +DECLARE_CSR(hpmcounter26, CSR_HPMCOUNTER26) +DECLARE_CSR(hpmcounter27, CSR_HPMCOUNTER27) +DECLARE_CSR(hpmcounter28, CSR_HPMCOUNTER28) +DECLARE_CSR(hpmcounter29, CSR_HPMCOUNTER29) +DECLARE_CSR(hpmcounter30, CSR_HPMCOUNTER30) +DECLARE_CSR(hpmcounter31, CSR_HPMCOUNTER31) +DECLARE_CSR(sstatus, CSR_SSTATUS) +DECLARE_CSR(sie, CSR_SIE) +DECLARE_CSR(stvec, CSR_STVEC) +DECLARE_CSR(scounteren, CSR_SCOUNTEREN) +DECLARE_CSR(sscratch, CSR_SSCRATCH) +DECLARE_CSR(sepc, CSR_SEPC) +DECLARE_CSR(scause, CSR_SCAUSE) +DECLARE_CSR(stval, CSR_STVAL) +DECLARE_CSR(sip, CSR_SIP) +DECLARE_CSR(satp, CSR_SATP) +DECLARE_CSR(mstatus, CSR_MSTATUS) +DECLARE_CSR(misa, CSR_MISA) +DECLARE_CSR(medeleg, CSR_MEDELEG) +DECLARE_CSR(mideleg, CSR_MIDELEG) +DECLARE_CSR(mie, CSR_MIE) +DECLARE_CSR(mtvec, CSR_MTVEC) +DECLARE_CSR(mcounteren, CSR_MCOUNTEREN) +DECLARE_CSR(mscratch, CSR_MSCRATCH) +DECLARE_CSR(mepc, CSR_MEPC) +DECLARE_CSR(mcause, CSR_MCAUSE) +DECLARE_CSR(mtval, CSR_MTVAL) +DECLARE_CSR(mip, CSR_MIP) +DECLARE_CSR(pmpcfg0, CSR_PMPCFG0) +DECLARE_CSR(pmpcfg1, CSR_PMPCFG1) +DECLARE_CSR(pmpcfg2, CSR_PMPCFG2) +DECLARE_CSR(pmpcfg3, CSR_PMPCFG3) +DECLARE_CSR(pmpaddr0, CSR_PMPADDR0) +DECLARE_CSR(pmpaddr1, CSR_PMPADDR1) +DECLARE_CSR(pmpaddr2, CSR_PMPADDR2) +DECLARE_CSR(pmpaddr3, CSR_PMPADDR3) +DECLARE_CSR(pmpaddr4, CSR_PMPADDR4) +DECLARE_CSR(pmpaddr5, CSR_PMPADDR5) +DECLARE_CSR(pmpaddr6, CSR_PMPADDR6) +DECLARE_CSR(pmpaddr7, CSR_PMPADDR7) +DECLARE_CSR(pmpaddr8, CSR_PMPADDR8) +DECLARE_CSR(pmpaddr9, CSR_PMPADDR9) +DECLARE_CSR(pmpaddr10, CSR_PMPADDR10) +DECLARE_CSR(pmpaddr11, CSR_PMPADDR11) +DECLARE_CSR(pmpaddr12, CSR_PMPADDR12) +DECLARE_CSR(pmpaddr13, CSR_PMPADDR13) +DECLARE_CSR(pmpaddr14, CSR_PMPADDR14) +DECLARE_CSR(pmpaddr15, CSR_PMPADDR15) +DECLARE_CSR(tselect, CSR_TSELECT) +DECLARE_CSR(tdata1, CSR_TDATA1) +DECLARE_CSR(tdata2, CSR_TDATA2) +DECLARE_CSR(tdata3, CSR_TDATA3) +DECLARE_CSR(dcsr, CSR_DCSR) +DECLARE_CSR(dpc, CSR_DPC) +DECLARE_CSR(dscratch, CSR_DSCRATCH) +DECLARE_CSR(mcycle, CSR_MCYCLE) +DECLARE_CSR(minstret, CSR_MINSTRET) +DECLARE_CSR(mhpmcounter3, CSR_MHPMCOUNTER3) +DECLARE_CSR(mhpmcounter4, CSR_MHPMCOUNTER4) +DECLARE_CSR(mhpmcounter5, CSR_MHPMCOUNTER5) +DECLARE_CSR(mhpmcounter6, CSR_MHPMCOUNTER6) +DECLARE_CSR(mhpmcounter7, CSR_MHPMCOUNTER7) +DECLARE_CSR(mhpmcounter8, CSR_MHPMCOUNTER8) +DECLARE_CSR(mhpmcounter9, CSR_MHPMCOUNTER9) +DECLARE_CSR(mhpmcounter10, CSR_MHPMCOUNTER10) +DECLARE_CSR(mhpmcounter11, CSR_MHPMCOUNTER11) +DECLARE_CSR(mhpmcounter12, CSR_MHPMCOUNTER12) +DECLARE_CSR(mhpmcounter13, CSR_MHPMCOUNTER13) +DECLARE_CSR(mhpmcounter14, CSR_MHPMCOUNTER14) +DECLARE_CSR(mhpmcounter15, CSR_MHPMCOUNTER15) +DECLARE_CSR(mhpmcounter16, CSR_MHPMCOUNTER16) +DECLARE_CSR(mhpmcounter17, CSR_MHPMCOUNTER17) +DECLARE_CSR(mhpmcounter18, CSR_MHPMCOUNTER18) +DECLARE_CSR(mhpmcounter19, CSR_MHPMCOUNTER19) +DECLARE_CSR(mhpmcounter20, CSR_MHPMCOUNTER20) +DECLARE_CSR(mhpmcounter21, CSR_MHPMCOUNTER21) +DECLARE_CSR(mhpmcounter22, CSR_MHPMCOUNTER22) +DECLARE_CSR(mhpmcounter23, CSR_MHPMCOUNTER23) +DECLARE_CSR(mhpmcounter24, CSR_MHPMCOUNTER24) +DECLARE_CSR(mhpmcounter25, CSR_MHPMCOUNTER25) +DECLARE_CSR(mhpmcounter26, CSR_MHPMCOUNTER26) +DECLARE_CSR(mhpmcounter27, CSR_MHPMCOUNTER27) +DECLARE_CSR(mhpmcounter28, CSR_MHPMCOUNTER28) +DECLARE_CSR(mhpmcounter29, CSR_MHPMCOUNTER29) +DECLARE_CSR(mhpmcounter30, CSR_MHPMCOUNTER30) +DECLARE_CSR(mhpmcounter31, CSR_MHPMCOUNTER31) +DECLARE_CSR(mhpmevent3, CSR_MHPMEVENT3) +DECLARE_CSR(mhpmevent4, CSR_MHPMEVENT4) +DECLARE_CSR(mhpmevent5, CSR_MHPMEVENT5) +DECLARE_CSR(mhpmevent6, CSR_MHPMEVENT6) +DECLARE_CSR(mhpmevent7, CSR_MHPMEVENT7) +DECLARE_CSR(mhpmevent8, CSR_MHPMEVENT8) +DECLARE_CSR(mhpmevent9, CSR_MHPMEVENT9) +DECLARE_CSR(mhpmevent10, CSR_MHPMEVENT10) +DECLARE_CSR(mhpmevent11, CSR_MHPMEVENT11) +DECLARE_CSR(mhpmevent12, CSR_MHPMEVENT12) +DECLARE_CSR(mhpmevent13, CSR_MHPMEVENT13) +DECLARE_CSR(mhpmevent14, CSR_MHPMEVENT14) +DECLARE_CSR(mhpmevent15, CSR_MHPMEVENT15) +DECLARE_CSR(mhpmevent16, CSR_MHPMEVENT16) +DECLARE_CSR(mhpmevent17, CSR_MHPMEVENT17) +DECLARE_CSR(mhpmevent18, CSR_MHPMEVENT18) +DECLARE_CSR(mhpmevent19, CSR_MHPMEVENT19) +DECLARE_CSR(mhpmevent20, CSR_MHPMEVENT20) +DECLARE_CSR(mhpmevent21, CSR_MHPMEVENT21) +DECLARE_CSR(mhpmevent22, CSR_MHPMEVENT22) +DECLARE_CSR(mhpmevent23, CSR_MHPMEVENT23) +DECLARE_CSR(mhpmevent24, CSR_MHPMEVENT24) +DECLARE_CSR(mhpmevent25, CSR_MHPMEVENT25) +DECLARE_CSR(mhpmevent26, CSR_MHPMEVENT26) +DECLARE_CSR(mhpmevent27, CSR_MHPMEVENT27) +DECLARE_CSR(mhpmevent28, CSR_MHPMEVENT28) +DECLARE_CSR(mhpmevent29, CSR_MHPMEVENT29) +DECLARE_CSR(mhpmevent30, CSR_MHPMEVENT30) +DECLARE_CSR(mhpmevent31, CSR_MHPMEVENT31) +DECLARE_CSR(mvendorid, CSR_MVENDORID) +DECLARE_CSR(marchid, CSR_MARCHID) +DECLARE_CSR(mimpid, CSR_MIMPID) +DECLARE_CSR(mhartid, CSR_MHARTID) +DECLARE_CSR(cycleh, CSR_CYCLEH) +DECLARE_CSR(timeh, CSR_TIMEH) +DECLARE_CSR(instreth, CSR_INSTRETH) +DECLARE_CSR(hpmcounter3h, CSR_HPMCOUNTER3H) +DECLARE_CSR(hpmcounter4h, CSR_HPMCOUNTER4H) +DECLARE_CSR(hpmcounter5h, CSR_HPMCOUNTER5H) +DECLARE_CSR(hpmcounter6h, CSR_HPMCOUNTER6H) +DECLARE_CSR(hpmcounter7h, CSR_HPMCOUNTER7H) +DECLARE_CSR(hpmcounter8h, CSR_HPMCOUNTER8H) +DECLARE_CSR(hpmcounter9h, CSR_HPMCOUNTER9H) +DECLARE_CSR(hpmcounter10h, CSR_HPMCOUNTER10H) +DECLARE_CSR(hpmcounter11h, CSR_HPMCOUNTER11H) +DECLARE_CSR(hpmcounter12h, CSR_HPMCOUNTER12H) +DECLARE_CSR(hpmcounter13h, CSR_HPMCOUNTER13H) +DECLARE_CSR(hpmcounter14h, CSR_HPMCOUNTER14H) +DECLARE_CSR(hpmcounter15h, CSR_HPMCOUNTER15H) +DECLARE_CSR(hpmcounter16h, CSR_HPMCOUNTER16H) +DECLARE_CSR(hpmcounter17h, CSR_HPMCOUNTER17H) +DECLARE_CSR(hpmcounter18h, CSR_HPMCOUNTER18H) +DECLARE_CSR(hpmcounter19h, CSR_HPMCOUNTER19H) +DECLARE_CSR(hpmcounter20h, CSR_HPMCOUNTER20H) +DECLARE_CSR(hpmcounter21h, CSR_HPMCOUNTER21H) +DECLARE_CSR(hpmcounter22h, CSR_HPMCOUNTER22H) +DECLARE_CSR(hpmcounter23h, CSR_HPMCOUNTER23H) +DECLARE_CSR(hpmcounter24h, CSR_HPMCOUNTER24H) +DECLARE_CSR(hpmcounter25h, CSR_HPMCOUNTER25H) +DECLARE_CSR(hpmcounter26h, CSR_HPMCOUNTER26H) +DECLARE_CSR(hpmcounter27h, CSR_HPMCOUNTER27H) +DECLARE_CSR(hpmcounter28h, CSR_HPMCOUNTER28H) +DECLARE_CSR(hpmcounter29h, CSR_HPMCOUNTER29H) +DECLARE_CSR(hpmcounter30h, CSR_HPMCOUNTER30H) +DECLARE_CSR(hpmcounter31h, CSR_HPMCOUNTER31H) +DECLARE_CSR(mcycleh, CSR_MCYCLEH) +DECLARE_CSR(minstreth, CSR_MINSTRETH) +DECLARE_CSR(mhpmcounter3h, CSR_MHPMCOUNTER3H) +DECLARE_CSR(mhpmcounter4h, CSR_MHPMCOUNTER4H) +DECLARE_CSR(mhpmcounter5h, CSR_MHPMCOUNTER5H) +DECLARE_CSR(mhpmcounter6h, CSR_MHPMCOUNTER6H) +DECLARE_CSR(mhpmcounter7h, CSR_MHPMCOUNTER7H) +DECLARE_CSR(mhpmcounter8h, CSR_MHPMCOUNTER8H) +DECLARE_CSR(mhpmcounter9h, CSR_MHPMCOUNTER9H) +DECLARE_CSR(mhpmcounter10h, CSR_MHPMCOUNTER10H) +DECLARE_CSR(mhpmcounter11h, CSR_MHPMCOUNTER11H) +DECLARE_CSR(mhpmcounter12h, CSR_MHPMCOUNTER12H) +DECLARE_CSR(mhpmcounter13h, CSR_MHPMCOUNTER13H) +DECLARE_CSR(mhpmcounter14h, CSR_MHPMCOUNTER14H) +DECLARE_CSR(mhpmcounter15h, CSR_MHPMCOUNTER15H) +DECLARE_CSR(mhpmcounter16h, CSR_MHPMCOUNTER16H) +DECLARE_CSR(mhpmcounter17h, CSR_MHPMCOUNTER17H) +DECLARE_CSR(mhpmcounter18h, CSR_MHPMCOUNTER18H) +DECLARE_CSR(mhpmcounter19h, CSR_MHPMCOUNTER19H) +DECLARE_CSR(mhpmcounter20h, CSR_MHPMCOUNTER20H) +DECLARE_CSR(mhpmcounter21h, CSR_MHPMCOUNTER21H) +DECLARE_CSR(mhpmcounter22h, CSR_MHPMCOUNTER22H) +DECLARE_CSR(mhpmcounter23h, CSR_MHPMCOUNTER23H) +DECLARE_CSR(mhpmcounter24h, CSR_MHPMCOUNTER24H) +DECLARE_CSR(mhpmcounter25h, CSR_MHPMCOUNTER25H) +DECLARE_CSR(mhpmcounter26h, CSR_MHPMCOUNTER26H) +DECLARE_CSR(mhpmcounter27h, CSR_MHPMCOUNTER27H) +DECLARE_CSR(mhpmcounter28h, CSR_MHPMCOUNTER28H) +DECLARE_CSR(mhpmcounter29h, CSR_MHPMCOUNTER29H) +DECLARE_CSR(mhpmcounter30h, CSR_MHPMCOUNTER30H) +DECLARE_CSR(mhpmcounter31h, CSR_MHPMCOUNTER31H) +#endif +#ifdef DECLARE_CAUSE +DECLARE_CAUSE("misaligned fetch", CAUSE_MISALIGNED_FETCH) +DECLARE_CAUSE("fetch access", CAUSE_FETCH_ACCESS) +DECLARE_CAUSE("illegal instruction", CAUSE_ILLEGAL_INSTRUCTION) +DECLARE_CAUSE("breakpoint", CAUSE_BREAKPOINT) +DECLARE_CAUSE("misaligned load", CAUSE_MISALIGNED_LOAD) +DECLARE_CAUSE("load access", CAUSE_LOAD_ACCESS) +DECLARE_CAUSE("misaligned store", CAUSE_MISALIGNED_STORE) +DECLARE_CAUSE("store access", CAUSE_STORE_ACCESS) +DECLARE_CAUSE("user_ecall", CAUSE_USER_ECALL) +DECLARE_CAUSE("supervisor_ecall", CAUSE_SUPERVISOR_ECALL) +DECLARE_CAUSE("hypervisor_ecall", CAUSE_HYPERVISOR_ECALL) +DECLARE_CAUSE("machine_ecall", CAUSE_MACHINE_ECALL) +DECLARE_CAUSE("fetch page fault", CAUSE_FETCH_PAGE_FAULT) +DECLARE_CAUSE("load page fault", CAUSE_LOAD_PAGE_FAULT) +DECLARE_CAUSE("store page fault", CAUSE_STORE_PAGE_FAULT) +#endif diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/Makefile b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/Makefile new file mode 100644 index 00000000..839a68e5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/Makefile @@ -0,0 +1,17 @@ + +TOT = .. +gengen = $(TOT)/gengen_tool/gengen + +default: + rm -f test_*.h + $(gengen) -i test_pmp_ok_1.cc_skel --file-name test_pmp_ok_1.h --gen-name pmp_ok_1 + $(gengen) -i test_pmp_csr_1.cc_skel --file-name test_pmp_csr_1.h --gen-name pmp_csr_1 + $(gengen) -i test_pmp_ok_share_1.cc_skel --file-name test_pmp_ok_share_1.h --gen-name pmp_ok_share_1 + +gen: + -rm -rf outputs; mkdir outputs + g++ -g3 -O2 gen_pmp_test.cc -o a.out + ./a.out + +clean: + rm -rf test_*.h a.out outputs/* diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/gen_pmp_test.cc b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/gen_pmp_test.cc new file mode 100644 index 00000000..e464aa1d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/gen_pmp_test.cc @@ -0,0 +1,379 @@ +/* + * insn_utest-1.cc + * + * Created on: Mar.6 2020 + * Author: soberl + */ +#include +#include +#include +#include +#include + +#include +#include + +#include "test_pmp_ok_1.h" +#include "test_pmp_ok_share_1.h" +#include "test_pmp_csr_1.h" + +#define GEN_ALL 1 + +namespace { + +const unsigned expected_files_count[] = { + 256 - 64, + 528, + 24, + 0 +}; + + std::ostringstream str_buffer, val_buffer; + std::ofstream m_ofstream; + unsigned cur_files_count = 0; + unsigned cur_expected_errors = 0; + + const int max_pmp = 16; // from spike + const int max_pmp_cfg = max_pmp / 8; // for RV64 +}; + + + +int +main() +{ +#if GEN_ALL + pmp_ok_1_gen_class gen_class_1; + + for (int u_mode = 0; u_mode < 2; u_mode++) { + for (int r = 0; r < 2; r++) { + for (int w = 0; w < 2; w++) { + for (int x = 0; x < 2; x++) { + for (int cfgl = 0; cfgl < 2; cfgl++) { + for (int pmp_match = 0; pmp_match < 2; pmp_match++) { + for (int mmwp = 0; mmwp < 2; mmwp++) { + for (int mml = 0; mml < 2; mml++) { + /* + * For RW=01, + * - mml == 1, test in pmp_ok_share_1 + * - mml == 0, reserved. + */ + if (r == 0 && w == 1) continue; + + str_buffer.str(""); + str_buffer << "outputs/test_pmp_ok_1_u" << u_mode << "_rw" << r << w << "_x" << x << "_l" << cfgl + << "_match" << pmp_match << "_mmwp" << mmwp << "_mml" << mml << ".c"; + m_ofstream.open(str_buffer.str().c_str()); + cur_files_count++; + + gen_class_1.set_tag(str_buffer.str()); + + unsigned rw_err = 0; + unsigned x_err = 0; + + gen_class_1.set_switch_u_mode(u_mode); + gen_class_1.set_pmp_r(r); + gen_class_1.set_pmp_w(w); + gen_class_1.set_pmp_x(x); + gen_class_1.set_pmp_l(cfgl); + + if (mml) { + gen_class_1.set_m_mode_rwx(0); + } else { + gen_class_1.set_m_mode_rwx(cur_files_count % 3 == 0 ? 1 : 0); + } + + gen_class_1.set_set_sec_mmwp(mmwp); + gen_class_1.set_set_sec_mml(mml); + + if (pmp_match) { + gen_class_1.set_create_pmp_cfg(pmp_match); + gen_class_1.set_pmp_addr_offset(0); + if (mml) { + if (cfgl && r && w && x) { // 2nd version, XWRL-MML is shared read-only + rw_err = 1; + x_err = 1; + } else { + if (1 - u_mode != cfgl) { + rw_err = 1; + x_err = 1; + } + if (r == 0 || w == 0) rw_err = 1; + if (x == 0) x_err = 1; + } + } else { + if (u_mode == 1 || cfgl) { + if (r == 0 || w == 0) rw_err = 1; + if (x == 0) x_err = 1; + } + } + } else { + if (cur_files_count % 3 == 0) { + gen_class_1.set_create_pmp_cfg(1); + gen_class_1.set_pmp_addr_offset(0x100); // >= sizeof(.test) section + } else { + gen_class_1.set_create_pmp_cfg(0); + } + if (u_mode == 1 || mmwp) { // mmwp to against non-match + rw_err = 1; + x_err = 1; + } else if (mml == 1) { + x_err = 1; + } + } + + cur_expected_errors += rw_err + x_err; + gen_class_1.set_expected_rw_fail(rw_err); + gen_class_1.set_expected_x_fail(x_err); + + str_buffer.str(""); + gen_class_1.generate_pmp_ok_1(str_buffer, 0); + str_buffer << std::endl; + m_ofstream << str_buffer.str(); + m_ofstream.close(); + } + } + } + } + } + } + } + } +#endif + +#if GEN_ALL + pmp_csr_1_gen_class gen_class_2; + + for (int pmp_lock = 0; pmp_lock < 2; pmp_lock++) { + for (int lock_once = 0; lock_once < 2; lock_once++) { + if (pmp_lock == 1 && lock_once == 1) continue; // locked once anyway + for (int pre_rlb = 0; pre_rlb < 2; pre_rlb++) { + for (int pre_mmwp = 0; pre_mmwp < 2; pre_mmwp++) { + for (int pre_mml = 0; pre_mml < 2; pre_mml++) { + for (int test_pmp = 0; test_pmp < 2; test_pmp++) { + for (int idx = 0; idx < 2; idx++) { + if (test_pmp == 0 && idx == 1) continue; // only 1 seccfg + for (int val = 0; val < 8; val++) { + if (val == 0 && test_pmp) continue; // skip, since no change +#if TEST_RW01_ONLY + if (test_pmp) { + if ((idx == 0 && (val & 0x3) == 0x1) || (idx == 1 && (val & 0x3) == 0x2)) { + // test RW=01; + } else { + continue; + } + } +#endif + + str_buffer.str(""); + str_buffer << "outputs/test_pmp_csr_1_lock" << pmp_lock << lock_once + << "_rlb" << pre_rlb << "_mmwp" << pre_mmwp << "_mml" << pre_mml + << (test_pmp ? "_pmp_" : "_sec_") << idx << val + << ".c"; + m_ofstream.open(str_buffer.str().c_str()); + cur_files_count++; + + gen_class_2.set_tag(str_buffer.str()); + + gen_class_2.set_m_mode_rwx(0); + gen_class_2.set_pmp_lock(pmp_lock); + gen_class_2.set_lock_once(lock_once); + + gen_class_2.set_lock_bypass(pre_rlb); + gen_class_2.set_pre_sec_mml(pre_mml); + gen_class_2.set_pre_sec_mmwp(pre_mmwp); + + gen_class_2.set_group_pmp(test_pmp); + + int pmpcfg_fail = 0; + int pmpaddr_fail = 0; + int seccfg_fail = 0; + + if (test_pmp == 1) { // pmpcfg and pmpaddr test + gen_class_2.set_revert_rwx(val); + + if (idx == 0) { // for cfg2 and cfg3, since PMP_L might set there + int sub_idx = 2 + cur_files_count % 2; + gen_class_2.set_addr_idx(sub_idx); + gen_class_2.set_addr_offset(0); + gen_class_2.set_cfg_idx(0); + gen_class_2.set_cfg_sub_idx(sub_idx); + + if (pmp_lock && !pre_rlb) { + pmpcfg_fail = 1; + pmpaddr_fail = 1; + } else { + // RW=01 is not allowed for MML==0 + if (!pre_mml && (val & 0x3) == 0x1) { // b'11^01 = 10, RW=01 + pmpcfg_fail = 1; + } + + /* + * Adding a rule with executable privileges that either is M-mode-only or a locked Shared-Region + * is not possible and such pmpcfg writes are ignored, leaving pmpcfg unchanged. + */ + bool set_PMP_L = (lock_once != pmp_lock); + unsigned rwx = 7 ^ val; + if (!pre_rlb && pre_mml && set_PMP_L && (rwx != 7) && ((rwx & 0x4) == 0x4 || (rwx & 0x3) == 0x2)) { + pmpcfg_fail = 1; + } + } + } else { // for invalid cfgs, start from 7 + gen_class_2.set_addr_idx(7 + cur_files_count % (max_pmp - 7)); + gen_class_2.set_addr_offset(0x10000); + gen_class_2.set_cfg_idx((1 + cur_files_count % (max_pmp_cfg - 1)) * 2); // for 2, 4, ..., 14 + gen_class_2.set_cfg_sub_idx((cur_files_count >> val) % 4); + if (!pre_mml && (val & 0x3) == 0x2) { // b'00^10 = 10, RW=01 + pmpcfg_fail = 1; + } + /* + * PMP_L cases with default LRWX=0000 + */ + bool set_PMP_L = (lock_once != 0); + unsigned rwx = 0 ^ val; + if (!pre_rlb && pre_mml && set_PMP_L && (rwx != 7) && ((rwx & 0x4) == 0x4 || (rwx & 0x3) == 0x2)) { + pmpcfg_fail = 1; + } + } + + if (pmpcfg_fail || pmpaddr_fail) cur_expected_errors += 1; + } else { // seccfg test + unsigned sec_val = val; + unsigned sec_rlb = (sec_val >> 2) & 0x1; + unsigned sec_mml = (sec_val >> 0) & 0x1; + unsigned sec_mmwp = (sec_val >> 1) & 0x1; + gen_class_2.set_sec_rlb(sec_rlb); + gen_class_2.set_sec_mml(sec_mml); + gen_class_2.set_sec_mmwp(sec_mmwp); + } + + gen_class_2.set_expected_seccfg_fail(seccfg_fail); + gen_class_2.set_expected_pmpaddr_fail(pmpaddr_fail); + gen_class_2.set_expected_pmpcfg_fail(pmpcfg_fail); + + str_buffer.str(""); + gen_class_2.generate_pmp_csr_1(str_buffer, 0); + str_buffer << std::endl; + m_ofstream << str_buffer.str(); + m_ofstream.close(); + } + } + } + } + } + } + } + } +#endif + +#if GEN_ALL + pmp_ok_share_1_gen_class gen_class_3; + for (int r = 0; r < 2; r++) { + for (int x = 0; x < 2; x++) { + for (int cfgl = 0; cfgl < 2; cfgl++) { + for (int typex = 0; typex < 2; typex++) { + for (int umode = 0; umode < 2; umode++) { + // not share mode and M mode + if (r == 1 && umode == 0) continue; + + str_buffer.str(""); + str_buffer << "outputs/test_pmp_ok_share_1_r" << r << "_x" << x << "_cfgl" << cfgl + << "_typex" << typex << "_umode" << umode << ".c"; + m_ofstream.open(str_buffer.str().c_str()); + cur_files_count++; + + gen_class_3.set_tag(str_buffer.str()); + + unsigned r_err = 0; + unsigned w_err = 0; + unsigned x_err = 0; + + gen_class_3.set_pmp_r(r); + gen_class_3.set_pmp_x(x); + gen_class_3.set_pmp_l(cfgl); + gen_class_3.set_typex(typex); + gen_class_3.set_enable_umode_test(umode); + + if (r != 0) { // not share mode + // 2nd version, XWRL-MML is shared read-only + if (x && cfgl) { + if (typex == 0) { + w_err = 1; + } else { + x_err = 1; + } + } else { + if (typex == 0) { + r_err = 1; + w_err = 1; + } else { + x_err = 1; + } + } + } else { + if (cfgl) { + if (typex == 0) { + if (x == 0) { + // no RW access + r_err = 1; + w_err = 1; + } else { + // readable for M mode + if (umode) { + r_err = 1; + w_err = 1; + } else { + w_err = 1; + } + } + } else { + // always executable + } + } else { + if (typex == 0) { + if (x == 0) { + // RW M mode, R for U + if (umode) { + w_err = 1; + } + } + } else { + x_err = 1; // when !cfgl, not executable + } + } + } + + cur_expected_errors += r_err + w_err + x_err; + gen_class_3.set_expected_r_fail(r_err); + gen_class_3.set_expected_w_fail(w_err); + gen_class_3.set_expected_x_fail(x_err); + + str_buffer.str(""); + gen_class_3.generate_pmp_ok_share_1(str_buffer, 0); + str_buffer << std::endl; + m_ofstream << str_buffer.str(); + m_ofstream.close(); + } + } + } + } + } +#endif + +#if GEN_ALL + unsigned expectedCount = 0; + for (int i=0; i> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_02.c new file mode 100644 index 00000000..d883f97a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_03.c new file mode 100644 index 00000000..9d1edcb6 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_04.c new file mode 100644 index 00000000..a0fb1a17 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_05.c new file mode 100644 index 00000000..7a41bc05 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_06.c new file mode 100644 index 00000000..a3179e91 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_07.c new file mode 100644 index 00000000..2655a688 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_11.c new file mode 100644 index 00000000..479a5d4e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_12.c new file mode 100644 index 00000000..aa05f33b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_13.c new file mode 100644 index 00000000..218484f0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_14.c new file mode 100644 index 00000000..c5b3341d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_15.c new file mode 100644 index 00000000..1332f9b8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_16.c new file mode 100644 index 00000000..a4af689e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_17.c new file mode 100644 index 00000000..9bb9768a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_00.c new file mode 100644 index 00000000..a1f8c655 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr0 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (0 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr0, %1 \n" + "\tcsrr %0, pmpaddr0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr0 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(0 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_01.c new file mode 100644 index 00000000..4e5a02c7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr0 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (0 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr0, %1 \n" + "\tcsrr %0, pmpaddr0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr0 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(0 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_02.c new file mode 100644 index 00000000..8c483335 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr0 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (0 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr0, %1 \n" + "\tcsrr %0, pmpaddr0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr0 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(0 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_03.c new file mode 100644 index 00000000..81d3b00f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr0 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (0 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr0, %1 \n" + "\tcsrr %0, pmpaddr0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr0 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(0 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_04.c new file mode 100644 index 00000000..c99a9d66 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr0 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (0 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr0, %1 \n" + "\tcsrr %0, pmpaddr0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr0 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(0 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_05.c new file mode 100644 index 00000000..3b4512b1 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr0 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (0 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr0, %1 \n" + "\tcsrr %0, pmpaddr0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr0 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(0 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_06.c new file mode 100644 index 00000000..5199b7d7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr0 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (0 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr0, %1 \n" + "\tcsrr %0, pmpaddr0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr0 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(0 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_07.c new file mode 100644 index 00000000..e5251d87 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr0 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (0 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr0, %1 \n" + "\tcsrr %0, pmpaddr0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr0 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(0 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_01.c new file mode 100644 index 00000000..b68e7a43 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_02.c new file mode 100644 index 00000000..733bad9a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_03.c new file mode 100644 index 00000000..573f06dd --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_04.c new file mode 100644 index 00000000..9407c487 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_05.c new file mode 100644 index 00000000..b7ebe3f4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_06.c new file mode 100644 index 00000000..1940f3c6 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_07.c new file mode 100644 index 00000000..45615800 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_11.c new file mode 100644 index 00000000..c433b2fb --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_12.c new file mode 100644 index 00000000..59651e35 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_13.c new file mode 100644 index 00000000..af7fbde8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_14.c new file mode 100644 index 00000000..e5b59a56 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_15.c new file mode 100644 index 00000000..49d57ec5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_16.c new file mode 100644 index 00000000..d2f33b0a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_17.c new file mode 100644 index 00000000..3e9ea53c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_00.c new file mode 100644 index 00000000..a575841a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_01.c new file mode 100644 index 00000000..d167644b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_02.c new file mode 100644 index 00000000..e98e24c6 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_03.c new file mode 100644 index 00000000..44ee2ea2 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_04.c new file mode 100644 index 00000000..23b4f19d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_05.c new file mode 100644 index 00000000..ca759321 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_06.c new file mode 100644 index 00000000..bcec5531 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_07.c new file mode 100644 index 00000000..a9856777 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_01.c new file mode 100644 index 00000000..a7880143 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_02.c new file mode 100644 index 00000000..ee4577d1 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_03.c new file mode 100644 index 00000000..dbcd878a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_04.c new file mode 100644 index 00000000..981f5226 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_05.c new file mode 100644 index 00000000..1f8ad6a8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_06.c new file mode 100644 index 00000000..88491bd9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_07.c new file mode 100644 index 00000000..6bc196ed --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_11.c new file mode 100644 index 00000000..a6cd00c8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_12.c new file mode 100644 index 00000000..06a2670c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_13.c new file mode 100644 index 00000000..90d9f9d2 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_14.c new file mode 100644 index 00000000..2a78d1c3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_15.c new file mode 100644 index 00000000..e56ef606 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_16.c new file mode 100644 index 00000000..18ffa1d8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_17.c new file mode 100644 index 00000000..9f797567 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_00.c new file mode 100644 index 00000000..02b2015d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_01.c new file mode 100644 index 00000000..6e863fde --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_02.c new file mode 100644 index 00000000..60438cce --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_03.c new file mode 100644 index 00000000..5ebd6d4b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_04.c new file mode 100644 index 00000000..0a304bcc --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_05.c new file mode 100644 index 00000000..bd160f8b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_06.c new file mode 100644 index 00000000..ce84df30 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_07.c new file mode 100644 index 00000000..331bb3a4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_01.c new file mode 100644 index 00000000..e612d663 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_02.c new file mode 100644 index 00000000..cddb2aef --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_03.c new file mode 100644 index 00000000..25f810cf --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_04.c new file mode 100644 index 00000000..4dbde5ff --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_05.c new file mode 100644 index 00000000..931ddd4a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_06.c new file mode 100644 index 00000000..d0f64cba --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_07.c new file mode 100644 index 00000000..a4b1d42b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_11.c new file mode 100644 index 00000000..90549a1a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_12.c new file mode 100644 index 00000000..f08995c5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_13.c new file mode 100644 index 00000000..b9a065a1 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_14.c new file mode 100644 index 00000000..b4ec9369 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_15.c new file mode 100644 index 00000000..d0f317ff --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_16.c new file mode 100644 index 00000000..4fc45e67 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_17.c new file mode 100644 index 00000000..f001df23 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_00.c new file mode 100644 index 00000000..f64da1b7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_01.c new file mode 100644 index 00000000..148f7eed --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_02.c new file mode 100644 index 00000000..15475f10 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_03.c new file mode 100644 index 00000000..26ecaa03 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_04.c new file mode 100644 index 00000000..87211001 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_05.c new file mode 100644 index 00000000..cf78bf42 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_06.c new file mode 100644 index 00000000..b95dfad4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_07.c new file mode 100644 index 00000000..e3969778 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_01.c new file mode 100644 index 00000000..29d55dc0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_02.c new file mode 100644 index 00000000..5329e8b4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_03.c new file mode 100644 index 00000000..d3a90ed6 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_04.c new file mode 100644 index 00000000..3a875e71 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_05.c new file mode 100644 index 00000000..825b1471 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_06.c new file mode 100644 index 00000000..12ad98d7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_07.c new file mode 100644 index 00000000..e0f4e4e5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_11.c new file mode 100644 index 00000000..65a01f1d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_12.c new file mode 100644 index 00000000..28fade41 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_13.c new file mode 100644 index 00000000..e85524c8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_14.c new file mode 100644 index 00000000..3f21aa8f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_15.c new file mode 100644 index 00000000..5c5b9e13 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_16.c new file mode 100644 index 00000000..d3becdd0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_17.c new file mode 100644 index 00000000..46d1e698 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_00.c new file mode 100644 index 00000000..5f54b2e5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_01.c new file mode 100644 index 00000000..4450d702 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_02.c new file mode 100644 index 00000000..d63b1380 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_03.c new file mode 100644 index 00000000..8169d4c1 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_04.c new file mode 100644 index 00000000..8e8825c8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_05.c new file mode 100644 index 00000000..d67039c9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_06.c new file mode 100644 index 00000000..34783232 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_07.c new file mode 100644 index 00000000..39cee672 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_01.c new file mode 100644 index 00000000..411cc559 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_02.c new file mode 100644 index 00000000..507f56cb --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_03.c new file mode 100644 index 00000000..57f0db54 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_04.c new file mode 100644 index 00000000..22805ab4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_05.c new file mode 100644 index 00000000..1bb4c3bb --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_06.c new file mode 100644 index 00000000..c6c6022e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_07.c new file mode 100644 index 00000000..6206f2b5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_11.c new file mode 100644 index 00000000..872cf9c8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_12.c new file mode 100644 index 00000000..e3dd7e1f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_13.c new file mode 100644 index 00000000..f2e81a1d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_14.c new file mode 100644 index 00000000..d2f38a7f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_15.c new file mode 100644 index 00000000..320bdccf --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_16.c new file mode 100644 index 00000000..4dcc75ea --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_17.c new file mode 100644 index 00000000..71bbe73a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_00.c new file mode 100644 index 00000000..c23dd2d9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_01.c new file mode 100644 index 00000000..1e2be0a8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_02.c new file mode 100644 index 00000000..4b701ff5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_03.c new file mode 100644 index 00000000..66245667 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_04.c new file mode 100644 index 00000000..dd24a817 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_05.c new file mode 100644 index 00000000..0b2c3371 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_06.c new file mode 100644 index 00000000..c1bfabc1 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_07.c new file mode 100644 index 00000000..3d83babf --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_01.c new file mode 100644 index 00000000..563d15b0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_02.c new file mode 100644 index 00000000..dd3aa46a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_03.c new file mode 100644 index 00000000..9843b909 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_04.c new file mode 100644 index 00000000..8bf13de8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_05.c new file mode 100644 index 00000000..2c685349 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_06.c new file mode 100644 index 00000000..c0ab076a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_07.c new file mode 100644 index 00000000..a864200b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_11.c new file mode 100644 index 00000000..f7d6f4f4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_12.c new file mode 100644 index 00000000..8bfc7600 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_13.c new file mode 100644 index 00000000..c2ee48f4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_14.c new file mode 100644 index 00000000..ac59085a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_15.c new file mode 100644 index 00000000..d976ff06 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_16.c new file mode 100644 index 00000000..84b826cd --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_17.c new file mode 100644 index 00000000..d3a333c4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_00.c new file mode 100644 index 00000000..656fd426 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_01.c new file mode 100644 index 00000000..ef870350 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_02.c new file mode 100644 index 00000000..8c3380e7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_03.c new file mode 100644 index 00000000..7289041c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_04.c new file mode 100644 index 00000000..de5d5576 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_05.c new file mode 100644 index 00000000..69f4a64d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_06.c new file mode 100644 index 00000000..45802766 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_07.c new file mode 100644 index 00000000..cb0a4693 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_01.c new file mode 100644 index 00000000..3bc4e6f0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_02.c new file mode 100644 index 00000000..d5d1116b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_03.c new file mode 100644 index 00000000..bce4fcf7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_04.c new file mode 100644 index 00000000..b85dc3a9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_05.c new file mode 100644 index 00000000..b312c82d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_06.c new file mode 100644 index 00000000..39489ad1 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_07.c new file mode 100644 index 00000000..8346afc9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_11.c new file mode 100644 index 00000000..210759b3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_12.c new file mode 100644 index 00000000..ab1fc641 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_13.c new file mode 100644 index 00000000..42b50320 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_14.c new file mode 100644 index 00000000..a821b075 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_15.c new file mode 100644 index 00000000..857ef934 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_16.c new file mode 100644 index 00000000..2d366726 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_17.c new file mode 100644 index 00000000..5bcd2d21 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_00.c new file mode 100644 index 00000000..da2a73dd --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_01.c new file mode 100644 index 00000000..edad03bf --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_02.c new file mode 100644 index 00000000..7c46885b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_03.c new file mode 100644 index 00000000..5b53aa93 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_04.c new file mode 100644 index 00000000..bd359d1d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_05.c new file mode 100644 index 00000000..99b66de7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_06.c new file mode 100644 index 00000000..138b957d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_07.c new file mode 100644 index 00000000..d418b0f0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_01.c new file mode 100644 index 00000000..1a3fe5de --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_02.c new file mode 100644 index 00000000..5f3a3917 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_03.c new file mode 100644 index 00000000..f6f5d9e1 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_04.c new file mode 100644 index 00000000..84b144e4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_05.c new file mode 100644 index 00000000..6201ad12 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_06.c new file mode 100644 index 00000000..7d3406d3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_07.c new file mode 100644 index 00000000..4771dde3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_11.c new file mode 100644 index 00000000..23806b44 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_12.c new file mode 100644 index 00000000..0532f351 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_13.c new file mode 100644 index 00000000..7f20ce63 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_14.c new file mode 100644 index 00000000..1c1f1824 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_15.c new file mode 100644 index 00000000..670a256f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_16.c new file mode 100644 index 00000000..407e45f2 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_17.c new file mode 100644 index 00000000..bf70f329 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_00.c new file mode 100644 index 00000000..235472e7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_01.c new file mode 100644 index 00000000..9a71c8fc --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_02.c new file mode 100644 index 00000000..690210ea --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_03.c new file mode 100644 index 00000000..93c09f19 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_04.c new file mode 100644 index 00000000..35f7cfc3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_05.c new file mode 100644 index 00000000..56624f47 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_06.c new file mode 100644 index 00000000..088cbbf9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_07.c new file mode 100644 index 00000000..b5b056ce --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_01.c new file mode 100644 index 00000000..640439a8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_02.c new file mode 100644 index 00000000..1f81f74b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_03.c new file mode 100644 index 00000000..6411eef3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_04.c new file mode 100644 index 00000000..28406299 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_05.c new file mode 100644 index 00000000..a3eefe6c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_06.c new file mode 100644 index 00000000..0ae5d5fa --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_07.c new file mode 100644 index 00000000..75298acd --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_11.c new file mode 100644 index 00000000..6ef040d9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_12.c new file mode 100644 index 00000000..792e5be4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (1 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_13.c new file mode 100644 index 00000000..bb799cd4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_14.c new file mode 100644 index 00000000..06f3e202 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (1 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_15.c new file mode 100644 index 00000000..3901f359 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_16.c new file mode 100644 index 00000000..6aea5f72 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_17.c new file mode 100644 index 00000000..e1ef95f4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_00.c new file mode 100644 index 00000000..4a57dd41 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_01.c new file mode 100644 index 00000000..0799e19d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_02.c new file mode 100644 index 00000000..b64999c2 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_03.c new file mode 100644 index 00000000..6fe87896 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_04.c new file mode 100644 index 00000000..9e3cdce7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_05.c new file mode 100644 index 00000000..3dca07a4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_06.c new file mode 100644 index 00000000..c377ed39 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_07.c new file mode 100644 index 00000000..dead666e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_01.c new file mode 100644 index 00000000..2419a701 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_02.c new file mode 100644 index 00000000..64b4639c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_03.c new file mode 100644 index 00000000..2e9435e3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_04.c new file mode 100644 index 00000000..53908ac9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_05.c new file mode 100644 index 00000000..b8ccc166 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_06.c new file mode 100644 index 00000000..3021c532 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_07.c new file mode 100644 index 00000000..b570f859 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_11.c new file mode 100644 index 00000000..12cf707e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_12.c new file mode 100644 index 00000000..fe98ae6e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_13.c new file mode 100644 index 00000000..ac6a65ae --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (1 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_14.c new file mode 100644 index 00000000..7d474786 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_15.c new file mode 100644 index 00000000..acf2fc81 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (1 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_16.c new file mode 100644 index 00000000..2774b7a7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_17.c new file mode 100644 index 00000000..2c11c45e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_00.c new file mode 100644 index 00000000..0013abb9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_01.c new file mode 100644 index 00000000..cfad8e07 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_02.c new file mode 100644 index 00000000..f3007990 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_03.c new file mode 100644 index 00000000..3e0fce4a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_04.c new file mode 100644 index 00000000..dcd67a37 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_05.c new file mode 100644 index 00000000..6fffea06 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_06.c new file mode 100644 index 00000000..b194b9aa --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_07.c new file mode 100644 index 00000000..49715e09 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_01.c new file mode 100644 index 00000000..913585f5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_02.c new file mode 100644 index 00000000..060a32e7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_03.c new file mode 100644 index 00000000..f01fb95d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_04.c new file mode 100644 index 00000000..869215fd --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_05.c new file mode 100644 index 00000000..ab46071f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_06.c new file mode 100644 index 00000000..bd985462 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_07.c new file mode 100644 index 00000000..aafb4817 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_11.c new file mode 100644 index 00000000..3fb53d5d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (1 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_12.c new file mode 100644 index 00000000..4347f200 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_13.c new file mode 100644 index 00000000..7104c250 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_14.c new file mode 100644 index 00000000..721ceab4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_15.c new file mode 100644 index 00000000..902577d4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_16.c new file mode 100644 index 00000000..7b2215c8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_17.c new file mode 100644 index 00000000..34eafc0f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_00.c new file mode 100644 index 00000000..19bf539f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_01.c new file mode 100644 index 00000000..dcffef51 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_02.c new file mode 100644 index 00000000..b4b6caad --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_03.c new file mode 100644 index 00000000..e16eb222 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_04.c new file mode 100644 index 00000000..2b1f1759 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_05.c new file mode 100644 index 00000000..15e8ad02 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_06.c new file mode 100644 index 00000000..51eea2e8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_07.c new file mode 100644 index 00000000..8ae18f63 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_01.c new file mode 100644 index 00000000..33384ed4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_02.c new file mode 100644 index 00000000..9eb33c66 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_03.c new file mode 100644 index 00000000..99c397ad --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_04.c new file mode 100644 index 00000000..3dd63a4c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_05.c new file mode 100644 index 00000000..7973db03 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_06.c new file mode 100644 index 00000000..eeed0e91 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_07.c new file mode 100644 index 00000000..5b22d970 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_11.c new file mode 100644 index 00000000..f7aef7be --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_12.c new file mode 100644 index 00000000..f123cb56 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_13.c new file mode 100644 index 00000000..be46b6a9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_14.c new file mode 100644 index 00000000..c7bfa921 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (1 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_15.c new file mode 100644 index 00000000..11d9108d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_16.c new file mode 100644 index 00000000..d01fd8d3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_17.c new file mode 100644 index 00000000..516ece99 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_00.c new file mode 100644 index 00000000..073963f4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_01.c new file mode 100644 index 00000000..51e9862f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_02.c new file mode 100644 index 00000000..8128eeb5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_03.c new file mode 100644 index 00000000..183024a3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_04.c new file mode 100644 index 00000000..bfb7f15c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_05.c new file mode 100644 index 00000000..b1583015 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_06.c new file mode 100644 index 00000000..80478615 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_07.c new file mode 100644 index 00000000..2d365a77 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_01.c new file mode 100644 index 00000000..fbc2988b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_02.c new file mode 100644 index 00000000..c0d1b08b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_03.c new file mode 100644 index 00000000..ed94ce66 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_04.c new file mode 100644 index 00000000..a9ba9dd8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_05.c new file mode 100644 index 00000000..d6b7079e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_06.c new file mode 100644 index 00000000..39120ed2 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_07.c new file mode 100644 index 00000000..f79df0b4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_11.c new file mode 100644 index 00000000..34d897ce --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_12.c new file mode 100644 index 00000000..061aefe9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_13.c new file mode 100644 index 00000000..8f22b9a0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_14.c new file mode 100644 index 00000000..37409fcf --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_15.c new file mode 100644 index 00000000..b54a5f95 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_16.c new file mode 100644 index 00000000..3a94ebaf --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_17.c new file mode 100644 index 00000000..09e19aaa --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_00.c new file mode 100644 index 00000000..5ec0d311 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_01.c new file mode 100644 index 00000000..215f7d50 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_02.c new file mode 100644 index 00000000..15e6a992 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_03.c new file mode 100644 index 00000000..4abf2514 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_04.c new file mode 100644 index 00000000..0f0ae1ac --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_05.c new file mode 100644 index 00000000..2e14dacd --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_06.c new file mode 100644 index 00000000..fd257a1c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_07.c new file mode 100644 index 00000000..bb901149 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_01.c new file mode 100644 index 00000000..c2d3ef84 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_02.c new file mode 100644 index 00000000..8007ef97 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_03.c new file mode 100644 index 00000000..b25abf8a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_04.c new file mode 100644 index 00000000..6648b0bc --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_05.c new file mode 100644 index 00000000..aa96209d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_06.c new file mode 100644 index 00000000..a520e1a4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_07.c new file mode 100644 index 00000000..908a03d7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_11.c new file mode 100644 index 00000000..89c733fa --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_12.c new file mode 100644 index 00000000..956acb51 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (1 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_13.c new file mode 100644 index 00000000..169259a1 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_14.c new file mode 100644 index 00000000..73283230 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_15.c new file mode 100644 index 00000000..bcf56469 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_16.c new file mode 100644 index 00000000..d00889f2 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_17.c new file mode 100644 index 00000000..dee6b818 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_00.c new file mode 100644 index 00000000..ea1a53ab --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_01.c new file mode 100644 index 00000000..eec8a0d5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_02.c new file mode 100644 index 00000000..8599ddaf --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_03.c new file mode 100644 index 00000000..d0d86997 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_04.c new file mode 100644 index 00000000..9e5006b6 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_05.c new file mode 100644 index 00000000..3e937e00 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_06.c new file mode 100644 index 00000000..f2ed468b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_07.c new file mode 100644 index 00000000..2cd28450 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_01.c new file mode 100644 index 00000000..3b542b52 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_02.c new file mode 100644 index 00000000..c2033817 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_03.c new file mode 100644 index 00000000..4045ba63 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_04.c new file mode 100644 index 00000000..f3dbdbd7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_05.c new file mode 100644 index 00000000..9cca05e3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_06.c new file mode 100644 index 00000000..6249270e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_07.c new file mode 100644 index 00000000..244f9a5b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_11.c new file mode 100644 index 00000000..8d84d074 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (1 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_12.c new file mode 100644 index 00000000..ef831251 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_13.c new file mode 100644 index 00000000..371d4203 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_14.c new file mode 100644 index 00000000..0c8a3d54 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (1 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_15.c new file mode 100644 index 00000000..9a6dfe4f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_16.c new file mode 100644 index 00000000..9c3333c7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_17.c new file mode 100644 index 00000000..d694a8b1 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_00.c new file mode 100644 index 00000000..40a06541 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_01.c new file mode 100644 index 00000000..6ad709b7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_02.c new file mode 100644 index 00000000..5eea0b30 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_03.c new file mode 100644 index 00000000..995a931f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_04.c new file mode 100644 index 00000000..88126494 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_05.c new file mode 100644 index 00000000..aabb68d5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_06.c new file mode 100644 index 00000000..3f547768 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_07.c new file mode 100644 index 00000000..236d24bf --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_01.c new file mode 100644 index 00000000..5a3ca605 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_02.c new file mode 100644 index 00000000..e777536c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_03.c new file mode 100644 index 00000000..07ae0457 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_04.c new file mode 100644 index 00000000..38c4c94a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_05.c new file mode 100644 index 00000000..1ce57af0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_06.c new file mode 100644 index 00000000..c0bf34dd --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_07.c new file mode 100644 index 00000000..d18a5007 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_11.c new file mode 100644 index 00000000..6f83ce17 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_12.c new file mode 100644 index 00000000..cd9556af --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_13.c new file mode 100644 index 00000000..b334f85a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_14.c new file mode 100644 index 00000000..2bf9c05d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_15.c new file mode 100644 index 00000000..9ce76ed6 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_16.c new file mode 100644 index 00000000..5c432472 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_17.c new file mode 100644 index 00000000..b4bbae89 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_00.c new file mode 100644 index 00000000..e847bdbb --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_01.c new file mode 100644 index 00000000..946d571a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_02.c new file mode 100644 index 00000000..7142085b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_03.c new file mode 100644 index 00000000..8fd69745 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_04.c new file mode 100644 index 00000000..53632366 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_05.c new file mode 100644 index 00000000..73dd24b0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_06.c new file mode 100644 index 00000000..ba742091 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_07.c new file mode 100644 index 00000000..91f3d84c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_01.c new file mode 100644 index 00000000..54a2875f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_02.c new file mode 100644 index 00000000..42f3b403 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_03.c new file mode 100644 index 00000000..3d5dd3af --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_04.c new file mode 100644 index 00000000..95b33bd8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_05.c new file mode 100644 index 00000000..18fd50c5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_06.c new file mode 100644 index 00000000..b86e0556 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_07.c new file mode 100644 index 00000000..6c09a772 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_11.c new file mode 100644 index 00000000..c6f429bc --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_12.c new file mode 100644 index 00000000..46acb232 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_13.c new file mode 100644 index 00000000..21a0e727 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_14.c new file mode 100644 index 00000000..be9f7ab0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_15.c new file mode 100644 index 00000000..0e71d547 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_16.c new file mode 100644 index 00000000..966bc68b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_17.c new file mode 100644 index 00000000..b2b0ab55 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_00.c new file mode 100644 index 00000000..7cd73cd9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_01.c new file mode 100644 index 00000000..ec7b27b5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_02.c new file mode 100644 index 00000000..adc194fb --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_03.c new file mode 100644 index 00000000..89834d6a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_04.c new file mode 100644 index 00000000..5bad51a2 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_05.c new file mode 100644 index 00000000..742f2be8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_06.c new file mode 100644 index 00000000..f248eed0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_07.c new file mode 100644 index 00000000..aa67ff58 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_01.c new file mode 100644 index 00000000..056623f0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_02.c new file mode 100644 index 00000000..52484eac --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_03.c new file mode 100644 index 00000000..c3cc9b88 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_04.c new file mode 100644 index 00000000..bd325185 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_05.c new file mode 100644 index 00000000..2da0f52d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_06.c new file mode 100644 index 00000000..a0cc8e5d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_07.c new file mode 100644 index 00000000..3ceba7bb --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_11.c new file mode 100644 index 00000000..11a92815 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_12.c new file mode 100644 index 00000000..67a055c0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_13.c new file mode 100644 index 00000000..d56db180 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_14.c new file mode 100644 index 00000000..afa0a324 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_15.c new file mode 100644 index 00000000..6abfa044 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_16.c new file mode 100644 index 00000000..eca54dc9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_17.c new file mode 100644 index 00000000..2200f195 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_00.c new file mode 100644 index 00000000..d96c524b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_01.c new file mode 100644 index 00000000..e35753ee --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_02.c new file mode 100644 index 00000000..1c8bb0c8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_03.c new file mode 100644 index 00000000..c7304b59 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_04.c new file mode 100644 index 00000000..8fe50a57 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_05.c new file mode 100644 index 00000000..9b97a80b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_06.c new file mode 100644 index 00000000..59a02cd5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_07.c new file mode 100644 index 00000000..1d91a947 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_01.c new file mode 100644 index 00000000..059b0d86 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_02.c new file mode 100644 index 00000000..b9d344cb --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_03.c new file mode 100644 index 00000000..285b6a30 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_04.c new file mode 100644 index 00000000..58e24a91 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_05.c new file mode 100644 index 00000000..c592b7b4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_06.c new file mode 100644 index 00000000..ba4b0128 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_07.c new file mode 100644 index 00000000..845853cc --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_11.c new file mode 100644 index 00000000..189ba0f3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_12.c new file mode 100644 index 00000000..6f78b176 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_13.c new file mode 100644 index 00000000..81295858 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_14.c new file mode 100644 index 00000000..af1b86ce --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_15.c new file mode 100644 index 00000000..f5605d06 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_16.c new file mode 100644 index 00000000..43363e9d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_17.c new file mode 100644 index 00000000..b14d755e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_00.c new file mode 100644 index 00000000..2c48153b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_01.c new file mode 100644 index 00000000..edb1c9b2 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_02.c new file mode 100644 index 00000000..43bd421e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_03.c new file mode 100644 index 00000000..b6f4bb5f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_04.c new file mode 100644 index 00000000..4625c365 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_05.c new file mode 100644 index 00000000..40d94bcb --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_06.c new file mode 100644 index 00000000..36443cc8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_07.c new file mode 100644 index 00000000..3d3b9ac9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_01.c new file mode 100644 index 00000000..16458107 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_02.c new file mode 100644 index 00000000..2b4644e8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_03.c new file mode 100644 index 00000000..7256bcae --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_04.c new file mode 100644 index 00000000..869d8d73 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_05.c new file mode 100644 index 00000000..e599dfb7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_06.c new file mode 100644 index 00000000..4e76d7fa --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_07.c new file mode 100644 index 00000000..c866fc15 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_11.c new file mode 100644 index 00000000..a617d748 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_12.c new file mode 100644 index 00000000..caa0b79c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_13.c new file mode 100644 index 00000000..b04dd83f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_14.c new file mode 100644 index 00000000..c6ba6450 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_15.c new file mode 100644 index 00000000..68987d05 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_16.c new file mode 100644 index 00000000..6cf6d876 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_17.c new file mode 100644 index 00000000..5890323f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_00.c new file mode 100644 index 00000000..b3176fb3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_01.c new file mode 100644 index 00000000..0e6330b9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_02.c new file mode 100644 index 00000000..b3a2db24 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_03.c new file mode 100644 index 00000000..540a16d0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_04.c new file mode 100644 index 00000000..2582c331 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_05.c new file mode 100644 index 00000000..e2954af3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_06.c new file mode 100644 index 00000000..7067dae9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_07.c new file mode 100644 index 00000000..a8c7ee95 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_01.c new file mode 100644 index 00000000..877e2ac7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_02.c new file mode 100644 index 00000000..eac0826d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_03.c new file mode 100644 index 00000000..feadbcc5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_04.c new file mode 100644 index 00000000..426d572b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_05.c new file mode 100644 index 00000000..0b79dcca --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_06.c new file mode 100644 index 00000000..cbbad4fc --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_07.c new file mode 100644 index 00000000..134ab064 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_11.c new file mode 100644 index 00000000..a90342ff --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_12.c new file mode 100644 index 00000000..a7d380aa --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_13.c new file mode 100644 index 00000000..6fd62593 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_14.c new file mode 100644 index 00000000..70d89528 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_15.c new file mode 100644 index 00000000..a27075f9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_16.c new file mode 100644 index 00000000..94bb6514 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_17.c new file mode 100644 index 00000000..485e0bbb --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_00.c new file mode 100644 index 00000000..10e94e47 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_01.c new file mode 100644 index 00000000..2dada932 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_02.c new file mode 100644 index 00000000..1a9d7813 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_03.c new file mode 100644 index 00000000..0bbb5864 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_04.c new file mode 100644 index 00000000..2b848f27 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_05.c new file mode 100644 index 00000000..e3609e36 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_06.c new file mode 100644 index 00000000..884fc155 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_07.c new file mode 100644 index 00000000..c4b265cf --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_01.c new file mode 100644 index 00000000..65f38596 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_02.c new file mode 100644 index 00000000..dc490e83 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_03.c new file mode 100644 index 00000000..3cb091e1 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_04.c new file mode 100644 index 00000000..a22ddba4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_05.c new file mode 100644 index 00000000..a6fb16f3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_06.c new file mode 100644 index 00000000..cc8de44c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_07.c new file mode 100644 index 00000000..dae9ad9d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_11.c new file mode 100644 index 00000000..12a205af --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_12.c new file mode 100644 index 00000000..8dc34792 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_13.c new file mode 100644 index 00000000..bfa666d6 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_14.c new file mode 100644 index 00000000..5c7e3b70 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_15.c new file mode 100644 index 00000000..feaaa5f0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_16.c new file mode 100644 index 00000000..457e54b4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_17.c new file mode 100644 index 00000000..f1f1abf8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_00.c new file mode 100644 index 00000000..e5ac0d62 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_01.c new file mode 100644 index 00000000..e9d47ae2 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_02.c new file mode 100644 index 00000000..b5d26d1d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_03.c new file mode 100644 index 00000000..43cb37ac --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_04.c new file mode 100644 index 00000000..b0509ae9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_05.c new file mode 100644 index 00000000..b60a34d5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_06.c new file mode 100644 index 00000000..668ef74d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_07.c new file mode 100644 index 00000000..68808402 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_01.c new file mode 100644 index 00000000..724a844f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_02.c new file mode 100644 index 00000000..c735a6bf --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_03.c new file mode 100644 index 00000000..7a55c927 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_04.c new file mode 100644 index 00000000..bf737438 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_05.c new file mode 100644 index 00000000..081704c6 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_06.c new file mode 100644 index 00000000..0cc2b268 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_07.c new file mode 100644 index 00000000..90a557fe --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_11.c new file mode 100644 index 00000000..21cba2cc --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_12.c new file mode 100644 index 00000000..65729b6e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_13.c new file mode 100644 index 00000000..60289aa1 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_14.c new file mode 100644 index 00000000..cff8fd5e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_15.c new file mode 100644 index 00000000..c19310c3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_16.c new file mode 100644 index 00000000..7b738f46 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_17.c new file mode 100644 index 00000000..9fdbd482 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_00.c new file mode 100644 index 00000000..a63284d6 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_01.c new file mode 100644 index 00000000..4dc682a9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_02.c new file mode 100644 index 00000000..c8911b88 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_03.c new file mode 100644 index 00000000..e4e53b44 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_04.c new file mode 100644 index 00000000..6e95293d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_05.c new file mode 100644 index 00000000..f62b917e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_06.c new file mode 100644 index 00000000..c54a6c20 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_07.c new file mode 100644 index 00000000..f6b01623 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp0_mml0.c new file mode 100644 index 00000000..40b3fd1e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp0_mml1.c new file mode 100644 index 00000000..f3e37719 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp1_mml0.c new file mode 100644 index 00000000..77081a0e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp1_mml1.c new file mode 100644 index 00000000..94d25a03 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp0_mml0.c new file mode 100644 index 00000000..3d8a4fdc --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp0_mml1.c new file mode 100644 index 00000000..74c38d3d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp1_mml0.c new file mode 100644 index 00000000..a4bffc5b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp1_mml1.c new file mode 100644 index 00000000..1e188a5f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp0_mml0.c new file mode 100644 index 00000000..e6a859ce --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp0_mml1.c new file mode 100644 index 00000000..34c5570d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp1_mml0.c new file mode 100644 index 00000000..78cf5823 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp1_mml1.c new file mode 100644 index 00000000..73ab6e3b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp0_mml0.c new file mode 100644 index 00000000..af2e88fd --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp0_mml1.c new file mode 100644 index 00000000..c84cbfea --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp1_mml0.c new file mode 100644 index 00000000..1068f701 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp1_mml1.c new file mode 100644 index 00000000..f7ed3ac5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp0_mml0.c new file mode 100644 index 00000000..f7030806 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp0_mml1.c new file mode 100644 index 00000000..19f00239 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp1_mml0.c new file mode 100644 index 00000000..8bd7f966 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp1_mml1.c new file mode 100644 index 00000000..539b9c88 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp0_mml0.c new file mode 100644 index 00000000..9e28bda4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp0_mml1.c new file mode 100644 index 00000000..30c97840 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp1_mml0.c new file mode 100644 index 00000000..efa958c1 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp1_mml1.c new file mode 100644 index 00000000..d30c9b74 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp0_mml0.c new file mode 100644 index 00000000..1117a163 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp0_mml1.c new file mode 100644 index 00000000..2cf61dc4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp1_mml0.c new file mode 100644 index 00000000..0d9e737a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp1_mml1.c new file mode 100644 index 00000000..59b7e007 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp0_mml0.c new file mode 100644 index 00000000..74955806 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp0_mml1.c new file mode 100644 index 00000000..f8ba50be --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp1_mml0.c new file mode 100644 index 00000000..8dbfaa36 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp1_mml1.c new file mode 100644 index 00000000..41cfd81c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp0_mml0.c new file mode 100644 index 00000000..99202f86 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp0_mml1.c new file mode 100644 index 00000000..ccf25866 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp1_mml0.c new file mode 100644 index 00000000..88fe4370 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp1_mml1.c new file mode 100644 index 00000000..0e840c52 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp0_mml0.c new file mode 100644 index 00000000..993628df --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp0_mml1.c new file mode 100644 index 00000000..cbc89f2d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp1_mml0.c new file mode 100644 index 00000000..5d9df03d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp1_mml1.c new file mode 100644 index 00000000..bfa8b06a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp0_mml0.c new file mode 100644 index 00000000..2087b6a1 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp0_mml1.c new file mode 100644 index 00000000..93b311c8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp1_mml0.c new file mode 100644 index 00000000..01795ad6 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp1_mml1.c new file mode 100644 index 00000000..59627866 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp0_mml0.c new file mode 100644 index 00000000..737a582a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp0_mml1.c new file mode 100644 index 00000000..f4e02fcd --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp1_mml0.c new file mode 100644 index 00000000..b6cebfc0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp1_mml1.c new file mode 100644 index 00000000..a4a23d96 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp0_mml0.c new file mode 100644 index 00000000..185506cc --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp0_mml1.c new file mode 100644 index 00000000..e7780167 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp1_mml0.c new file mode 100644 index 00000000..291c05ab --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp1_mml1.c new file mode 100644 index 00000000..9c53d0fe --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp0_mml0.c new file mode 100644 index 00000000..b64debb9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp0_mml1.c new file mode 100644 index 00000000..6fa48efe --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp1_mml0.c new file mode 100644 index 00000000..daa81820 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp1_mml1.c new file mode 100644 index 00000000..37f20ca9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp0_mml0.c new file mode 100644 index 00000000..ed8193bd --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp0_mml1.c new file mode 100644 index 00000000..3de88ead --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp1_mml0.c new file mode 100644 index 00000000..9654129a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp1_mml1.c new file mode 100644 index 00000000..9810c431 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp0_mml0.c new file mode 100644 index 00000000..97ca1888 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp0_mml1.c new file mode 100644 index 00000000..26568415 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp1_mml0.c new file mode 100644 index 00000000..8c7c21e3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp1_mml1.c new file mode 100644 index 00000000..8c393805 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp0_mml0.c new file mode 100644 index 00000000..482f0ba1 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp0_mml1.c new file mode 100644 index 00000000..be2e6553 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp1_mml0.c new file mode 100644 index 00000000..00225439 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp1_mml1.c new file mode 100644 index 00000000..03c19fad --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp0_mml0.c new file mode 100644 index 00000000..dd1705be --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp0_mml1.c new file mode 100644 index 00000000..5be1d78b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp1_mml0.c new file mode 100644 index 00000000..b41e9f75 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp1_mml1.c new file mode 100644 index 00000000..3d0ebdd6 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp0_mml0.c new file mode 100644 index 00000000..24751757 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp0_mml1.c new file mode 100644 index 00000000..85c6b2cc --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp1_mml0.c new file mode 100644 index 00000000..80bd27fe --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp1_mml1.c new file mode 100644 index 00000000..554530a4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp0_mml0.c new file mode 100644 index 00000000..4e6698fd --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp0_mml1.c new file mode 100644 index 00000000..6b1d1bfc --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp1_mml0.c new file mode 100644 index 00000000..851663dd --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp1_mml1.c new file mode 100644 index 00000000..7494a8da --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp0_mml0.c new file mode 100644 index 00000000..ddd39b55 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp0_mml1.c new file mode 100644 index 00000000..2930f6fe --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp1_mml0.c new file mode 100644 index 00000000..c7fff758 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp1_mml1.c new file mode 100644 index 00000000..baa00691 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp0_mml0.c new file mode 100644 index 00000000..8f8828b5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp0_mml1.c new file mode 100644 index 00000000..79e4e01d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp1_mml0.c new file mode 100644 index 00000000..98b14231 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp1_mml1.c new file mode 100644 index 00000000..3aedc68e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp0_mml0.c new file mode 100644 index 00000000..0e65fc1f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp0_mml1.c new file mode 100644 index 00000000..d3e2eb9e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp1_mml0.c new file mode 100644 index 00000000..a17239c8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp1_mml1.c new file mode 100644 index 00000000..5f46fe74 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp0_mml0.c new file mode 100644 index 00000000..6120591b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp0_mml1.c new file mode 100644 index 00000000..ab910a33 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp1_mml0.c new file mode 100644 index 00000000..ac24185d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp1_mml1.c new file mode 100644 index 00000000..e365813c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp0_mml0.c new file mode 100644 index 00000000..7b7c3253 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp0_mml1.c new file mode 100644 index 00000000..411316b7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp1_mml0.c new file mode 100644 index 00000000..d4cd1cf5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp1_mml1.c new file mode 100644 index 00000000..ca14b9d7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp0_mml0.c new file mode 100644 index 00000000..5bbdb0d9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp0_mml1.c new file mode 100644 index 00000000..83107ab8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp1_mml0.c new file mode 100644 index 00000000..6009bfa6 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp1_mml1.c new file mode 100644 index 00000000..3f2e92fe --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp0_mml0.c new file mode 100644 index 00000000..ebfa32bc --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp0_mml1.c new file mode 100644 index 00000000..fcfdb7c3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp1_mml0.c new file mode 100644 index 00000000..8971b659 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp1_mml1.c new file mode 100644 index 00000000..b201b349 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp0_mml0.c new file mode 100644 index 00000000..8ce88f1e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp0_mml1.c new file mode 100644 index 00000000..03c823b7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp1_mml0.c new file mode 100644 index 00000000..09cab838 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp1_mml1.c new file mode 100644 index 00000000..8277548a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp0_mml0.c new file mode 100644 index 00000000..d325a8c6 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp0_mml1.c new file mode 100644 index 00000000..c33238f5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp1_mml0.c new file mode 100644 index 00000000..c609d1be --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp1_mml1.c new file mode 100644 index 00000000..c816efbd --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp0_mml0.c new file mode 100644 index 00000000..8464a098 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp0_mml1.c new file mode 100644 index 00000000..8477da4d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp1_mml0.c new file mode 100644 index 00000000..5b63fcd8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp1_mml1.c new file mode 100644 index 00000000..9a591e80 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp0_mml0.c new file mode 100644 index 00000000..8a4c8c5a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp0_mml1.c new file mode 100644 index 00000000..31e57fbc --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp1_mml0.c new file mode 100644 index 00000000..2d06aecc --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp1_mml1.c new file mode 100644 index 00000000..d9376f43 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp0_mml0.c new file mode 100644 index 00000000..d7ccc12e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp0_mml1.c new file mode 100644 index 00000000..0cb6a30c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp1_mml0.c new file mode 100644 index 00000000..90a91e6d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp1_mml1.c new file mode 100644 index 00000000..ae51b319 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp0_mml0.c new file mode 100644 index 00000000..282e459e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp0_mml1.c new file mode 100644 index 00000000..cba1b922 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp1_mml0.c new file mode 100644 index 00000000..6882bbf1 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp1_mml1.c new file mode 100644 index 00000000..0f18a9c9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp0_mml0.c new file mode 100644 index 00000000..729cd357 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp0_mml1.c new file mode 100644 index 00000000..65bc8335 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp1_mml0.c new file mode 100644 index 00000000..8741c283 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp1_mml1.c new file mode 100644 index 00000000..e05bfe67 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp0_mml0.c new file mode 100644 index 00000000..6894def9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp0_mml1.c new file mode 100644 index 00000000..cfd73713 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp1_mml0.c new file mode 100644 index 00000000..117b1ff0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp1_mml1.c new file mode 100644 index 00000000..4ac00d2c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp0_mml0.c new file mode 100644 index 00000000..5ef8ce29 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp0_mml1.c new file mode 100644 index 00000000..44585be0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp1_mml0.c new file mode 100644 index 00000000..f8f46f88 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp1_mml1.c new file mode 100644 index 00000000..e97bd91f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp0_mml0.c new file mode 100644 index 00000000..e5393dee --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp0_mml1.c new file mode 100644 index 00000000..81041eba --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp1_mml0.c new file mode 100644 index 00000000..fd2447af --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp1_mml1.c new file mode 100644 index 00000000..b5272cf6 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp0_mml0.c new file mode 100644 index 00000000..85aa3fe8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp0_mml1.c new file mode 100644 index 00000000..f8fb3c68 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp1_mml0.c new file mode 100644 index 00000000..cd12b79d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp1_mml1.c new file mode 100644 index 00000000..4f89f802 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp0_mml0.c new file mode 100644 index 00000000..3fdda498 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp0_mml1.c new file mode 100644 index 00000000..475246bb --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp1_mml0.c new file mode 100644 index 00000000..dee02396 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp1_mml1.c new file mode 100644 index 00000000..8de615d8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp0_mml0.c new file mode 100644 index 00000000..a596e329 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp0_mml1.c new file mode 100644 index 00000000..d5c24006 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp1_mml0.c new file mode 100644 index 00000000..b9b4f3c1 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp1_mml1.c new file mode 100644 index 00000000..7aba47e2 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp0_mml0.c new file mode 100644 index 00000000..904fc44d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp0_mml1.c new file mode 100644 index 00000000..eb2c2fcd --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp1_mml0.c new file mode 100644 index 00000000..ae5fbd5b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp1_mml1.c new file mode 100644 index 00000000..0fa4d95a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp0_mml0.c new file mode 100644 index 00000000..034c0637 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp0_mml1.c new file mode 100644 index 00000000..218a65d8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp1_mml0.c new file mode 100644 index 00000000..7b4228a0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp1_mml1.c new file mode 100644 index 00000000..9595d009 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp0_mml0.c new file mode 100644 index 00000000..7351d127 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp0_mml1.c new file mode 100644 index 00000000..fa45c2c7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp1_mml0.c new file mode 100644 index 00000000..186e3174 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp1_mml1.c new file mode 100644 index 00000000..d1d4cf2f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp0_mml0.c new file mode 100644 index 00000000..94de37d3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp0_mml1.c new file mode 100644 index 00000000..8a05d92d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp1_mml0.c new file mode 100644 index 00000000..ed7ec3fa --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp1_mml1.c new file mode 100644 index 00000000..a29ba3ee --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp0_mml0.c new file mode 100644 index 00000000..79509326 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp0_mml1.c new file mode 100644 index 00000000..c377e8e7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp1_mml0.c new file mode 100644 index 00000000..27367967 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp1_mml1.c new file mode 100644 index 00000000..1b4b7c78 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp0_mml0.c new file mode 100644 index 00000000..e8b6d8f9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp0_mml1.c new file mode 100644 index 00000000..80dadf3d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp1_mml0.c new file mode 100644 index 00000000..5874dd65 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp1_mml1.c new file mode 100644 index 00000000..d367d8b6 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp0_mml0.c new file mode 100644 index 00000000..cfa481f9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp0_mml1.c new file mode 100644 index 00000000..214cd9c2 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp1_mml0.c new file mode 100644 index 00000000..bace2cf2 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp1_mml1.c new file mode 100644 index 00000000..070f6c66 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp0_mml0.c new file mode 100644 index 00000000..4d93c217 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp0_mml1.c new file mode 100644 index 00000000..4499f000 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp1_mml0.c new file mode 100644 index 00000000..96794703 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp1_mml1.c new file mode 100644 index 00000000..c30abb20 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex0_umode0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex0_umode0.c new file mode 100644 index 00000000..4ef61a78 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex0_umode0.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex0_umode0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 0) +#define TEST_FETCH (0) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 0; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((0 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (0 ? PMP_X : 0) + | (0 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 0 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex0_umode1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex0_umode1.c new file mode 100644 index 00000000..18c29089 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex0_umode1.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex0_umode1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 0) +#define TEST_FETCH (0) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 1; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((0 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (0 ? PMP_X : 0) + | (0 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 1 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex1_umode0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex1_umode0.c new file mode 100644 index 00000000..764ae510 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex1_umode0.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex1_umode0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 1) +#define TEST_FETCH (1) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 0; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((0 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (0 ? PMP_X : 0) + | (0 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 0 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex1_umode1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex1_umode1.c new file mode 100644 index 00000000..67fcd493 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex1_umode1.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex1_umode1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 1) +#define TEST_FETCH (1) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 0; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((0 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (0 ? PMP_X : 0) + | (0 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 1 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex0_umode0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex0_umode0.c new file mode 100644 index 00000000..9d5a1bb8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex0_umode0.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex0_umode0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 0) +#define TEST_FETCH (0) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 1; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 1; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((0 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (0 ? PMP_X : 0) + | (1 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 0 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex0_umode1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex0_umode1.c new file mode 100644 index 00000000..347cb0ce --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex0_umode1.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex0_umode1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 0) +#define TEST_FETCH (0) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 1; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 1; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((0 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (0 ? PMP_X : 0) + | (1 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 1 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex1_umode0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex1_umode0.c new file mode 100644 index 00000000..0fd54c69 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex1_umode0.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex1_umode0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 1) +#define TEST_FETCH (1) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 0; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((0 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (0 ? PMP_X : 0) + | (1 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 0 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex1_umode1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex1_umode1.c new file mode 100644 index 00000000..715c485d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex1_umode1.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex1_umode1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 1) +#define TEST_FETCH (1) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 0; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((0 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (0 ? PMP_X : 0) + | (1 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 1 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex0_umode0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex0_umode0.c new file mode 100644 index 00000000..b2c8bcba --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex0_umode0.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex0_umode0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 0) +#define TEST_FETCH (0) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 0; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((0 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (1 ? PMP_X : 0) + | (0 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 0 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex0_umode1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex0_umode1.c new file mode 100644 index 00000000..4d15a8b6 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex0_umode1.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex0_umode1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 0) +#define TEST_FETCH (0) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 0; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((0 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (1 ? PMP_X : 0) + | (0 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 1 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex1_umode0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex1_umode0.c new file mode 100644 index 00000000..8c12a5a2 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex1_umode0.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex1_umode0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 1) +#define TEST_FETCH (1) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 0; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((0 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (1 ? PMP_X : 0) + | (0 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 0 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex1_umode1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex1_umode1.c new file mode 100644 index 00000000..dd0337f5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex1_umode1.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex1_umode1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 1) +#define TEST_FETCH (1) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 0; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((0 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (1 ? PMP_X : 0) + | (0 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 1 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex0_umode0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex0_umode0.c new file mode 100644 index 00000000..58d495ce --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex0_umode0.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex0_umode0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 0) +#define TEST_FETCH (0) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 1; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((0 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (1 ? PMP_X : 0) + | (1 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 0 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex0_umode1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex0_umode1.c new file mode 100644 index 00000000..be5e02e0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex0_umode1.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex0_umode1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 0) +#define TEST_FETCH (0) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 1; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 1; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((0 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (1 ? PMP_X : 0) + | (1 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 1 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex1_umode0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex1_umode0.c new file mode 100644 index 00000000..02a96b0b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex1_umode0.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex1_umode0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 1) +#define TEST_FETCH (1) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 0; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((0 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (1 ? PMP_X : 0) + | (1 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 0 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex1_umode1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex1_umode1.c new file mode 100644 index 00000000..62457c95 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex1_umode1.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex1_umode1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 1) +#define TEST_FETCH (1) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 0; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((0 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (1 ? PMP_X : 0) + | (1 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 1 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x0_cfgl0_typex0_umode1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x0_cfgl0_typex0_umode1.c new file mode 100644 index 00000000..5af37e1e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x0_cfgl0_typex0_umode1.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r1_x0_cfgl0_typex0_umode1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 0) +#define TEST_FETCH (0) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 1; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 1; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((1 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (0 ? PMP_X : 0) + | (0 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 1 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x0_cfgl0_typex1_umode1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x0_cfgl0_typex1_umode1.c new file mode 100644 index 00000000..a6dacffb --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x0_cfgl0_typex1_umode1.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r1_x0_cfgl0_typex1_umode1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 1) +#define TEST_FETCH (1) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 0; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((1 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (0 ? PMP_X : 0) + | (0 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 1 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x0_cfgl1_typex0_umode1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x0_cfgl1_typex0_umode1.c new file mode 100644 index 00000000..58ce4407 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x0_cfgl1_typex0_umode1.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r1_x0_cfgl1_typex0_umode1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 0) +#define TEST_FETCH (0) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 1; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 1; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((1 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (0 ? PMP_X : 0) + | (1 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 1 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x0_cfgl1_typex1_umode1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x0_cfgl1_typex1_umode1.c new file mode 100644 index 00000000..29b59eaa --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x0_cfgl1_typex1_umode1.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r1_x0_cfgl1_typex1_umode1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 1) +#define TEST_FETCH (1) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 0; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((1 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (0 ? PMP_X : 0) + | (1 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 1 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x1_cfgl0_typex0_umode1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x1_cfgl0_typex0_umode1.c new file mode 100644 index 00000000..cc2e9709 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x1_cfgl0_typex0_umode1.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r1_x1_cfgl0_typex0_umode1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 0) +#define TEST_FETCH (0) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 1; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 1; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((1 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (1 ? PMP_X : 0) + | (0 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 1 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x1_cfgl0_typex1_umode1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x1_cfgl0_typex1_umode1.c new file mode 100644 index 00000000..94179906 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x1_cfgl0_typex1_umode1.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r1_x1_cfgl0_typex1_umode1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 1) +#define TEST_FETCH (1) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 0; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((1 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (1 ? PMP_X : 0) + | (0 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 1 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x1_cfgl1_typex0_umode1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x1_cfgl1_typex0_umode1.c new file mode 100644 index 00000000..5b2fd2d8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x1_cfgl1_typex0_umode1.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r1_x1_cfgl1_typex0_umode1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 0) +#define TEST_FETCH (0) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 1; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((1 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (1 ? PMP_X : 0) + | (1 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 1 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x1_cfgl1_typex1_umode1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x1_cfgl1_typex1_umode1.c new file mode 100644 index 00000000..e3849239 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x1_cfgl1_typex1_umode1.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r1_x1_cfgl1_typex1_umode1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 1) +#define TEST_FETCH (1) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 0; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((1 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (1 ? PMP_X : 0) + | (1 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 1 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_csr_1.cc_skel b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_csr_1.cc_skel new file mode 100644 index 00000000..c840d138 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_csr_1.cc_skel @@ -0,0 +1,313 @@ + +/* + * @tag@ + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @@set_sec_mml@@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX @m_mode_rwx:int@ + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = @expected_seccfg_fail:int@; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = @expected_pmpaddr_fail:int@; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = @expected_pmpcfg_fail:int@; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @@set_rlb_at_start:int@@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (@pre_sec_mml:int@) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((@lock_once:int@ || @pmp_lock:int@) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (@lock_once:int@ != @pmp_lock:int@) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (@pmp_lock:int@) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (@lock_bypass:int@ ? MSECCFG_RLB : 0) + | (@pre_sec_mml:int@ ? MSECCFG_MML : 0) + | (@pre_sec_mmwp:int@ ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if @group_pmp:int@ + asm volatile ("csrr %0, pmpaddr@addr_idx:int@ \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (@addr_idx:int@ == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + @addr_offset:int@; + } + asm volatile ("csrw pmpaddr@addr_idx:int@, %1 \n" + "\tcsrr %0, pmpaddr@addr_idx:int@ \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr@addr_idx:int@ expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than @cfg_sub_idx:int@ + asm volatile ("csrr %0, pmpcfg@cfg_idx:int@ \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(@revert_rwx:int@ | (@lock_once:int@ ? PMP_L : 0)) << (@cfg_sub_idx:int@ * 8)); + asm volatile ("csrw pmpcfg@cfg_idx:int@, %1 \n" + "\tcsrr %0, pmpcfg@cfg_idx:int@ \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (@sec_mml:int@) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (@sec_rlb:int@ ? MSECCFG_RLB : 0) + | (@sec_mml:int@ ? MSECCFG_MML : 0) + | (@sec_mmwp:int@ ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((@pre_sec_mml:int@ || @pmp_lock:int@ || @sec_mml:int@) + && @lock_bypass:int@ == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (@pre_sec_mml:int@) { + expected_val |= MSECCFG_MML; + } + if (@pre_sec_mmwp:int@) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_csr_1.h b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_csr_1.h new file mode 100644 index 00000000..4765af72 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_csr_1.h @@ -0,0 +1,1170 @@ +/* + * File automatically generated by + * gengen 1.4.2 by Lorenzo Bettini + * http://www.gnu.org/software/gengen + */ + +#ifndef PMP_CSR_1_GEN_CLASS_H +#define PMP_CSR_1_GEN_CLASS_H + +#include +#include + +using std::string; +using std::ostream; + +class pmp_csr_1_gen_class +{ + protected: + int addr_idx; + int addr_offset; + int cfg_idx; + int cfg_sub_idx; + int expected_pmpaddr_fail; + int expected_pmpcfg_fail; + int expected_seccfg_fail; + int group_pmp; + int lock_bypass; + int lock_once; + int m_mode_rwx; + int pmp_lock; + int pre_sec_mml; + int pre_sec_mmwp; + int revert_rwx; + int sec_mml; + int sec_mmwp; + int sec_rlb; + string tag; + + public: + pmp_csr_1_gen_class() : + addr_idx (0), addr_offset (0), cfg_idx (0), cfg_sub_idx (0), expected_pmpaddr_fail (0), expected_pmpcfg_fail (0), expected_seccfg_fail (0), group_pmp (0), lock_bypass (0), lock_once (0), m_mode_rwx (0), pmp_lock (0), pre_sec_mml (0), pre_sec_mmwp (0), revert_rwx (0), sec_mml (0), sec_mmwp (0), sec_rlb (0) + { + } + + pmp_csr_1_gen_class(int _addr_idx, int _addr_offset, int _cfg_idx, int _cfg_sub_idx, int _expected_pmpaddr_fail, int _expected_pmpcfg_fail, int _expected_seccfg_fail, int _group_pmp, int _lock_bypass, int _lock_once, int _m_mode_rwx, int _pmp_lock, int _pre_sec_mml, int _pre_sec_mmwp, int _revert_rwx, int _sec_mml, int _sec_mmwp, int _sec_rlb, const string &_tag) : + addr_idx (_addr_idx), addr_offset (_addr_offset), cfg_idx (_cfg_idx), cfg_sub_idx (_cfg_sub_idx), expected_pmpaddr_fail (_expected_pmpaddr_fail), expected_pmpcfg_fail (_expected_pmpcfg_fail), expected_seccfg_fail (_expected_seccfg_fail), group_pmp (_group_pmp), lock_bypass (_lock_bypass), lock_once (_lock_once), m_mode_rwx (_m_mode_rwx), pmp_lock (_pmp_lock), pre_sec_mml (_pre_sec_mml), pre_sec_mmwp (_pre_sec_mmwp), revert_rwx (_revert_rwx), sec_mml (_sec_mml), sec_mmwp (_sec_mmwp), sec_rlb (_sec_rlb), tag (_tag) + { + } + + void set_addr_idx(int _addr_idx) + { + addr_idx = _addr_idx; + } + + void set_addr_offset(int _addr_offset) + { + addr_offset = _addr_offset; + } + + void set_cfg_idx(int _cfg_idx) + { + cfg_idx = _cfg_idx; + } + + void set_cfg_sub_idx(int _cfg_sub_idx) + { + cfg_sub_idx = _cfg_sub_idx; + } + + void set_expected_pmpaddr_fail(int _expected_pmpaddr_fail) + { + expected_pmpaddr_fail = _expected_pmpaddr_fail; + } + + void set_expected_pmpcfg_fail(int _expected_pmpcfg_fail) + { + expected_pmpcfg_fail = _expected_pmpcfg_fail; + } + + void set_expected_seccfg_fail(int _expected_seccfg_fail) + { + expected_seccfg_fail = _expected_seccfg_fail; + } + + void set_group_pmp(int _group_pmp) + { + group_pmp = _group_pmp; + } + + void set_lock_bypass(int _lock_bypass) + { + lock_bypass = _lock_bypass; + } + + void set_lock_once(int _lock_once) + { + lock_once = _lock_once; + } + + void set_m_mode_rwx(int _m_mode_rwx) + { + m_mode_rwx = _m_mode_rwx; + } + + void set_pmp_lock(int _pmp_lock) + { + pmp_lock = _pmp_lock; + } + + void set_pre_sec_mml(int _pre_sec_mml) + { + pre_sec_mml = _pre_sec_mml; + } + + void set_pre_sec_mmwp(int _pre_sec_mmwp) + { + pre_sec_mmwp = _pre_sec_mmwp; + } + + void set_revert_rwx(int _revert_rwx) + { + revert_rwx = _revert_rwx; + } + + void set_sec_mml(int _sec_mml) + { + sec_mml = _sec_mml; + } + + void set_sec_mmwp(int _sec_mmwp) + { + sec_mmwp = _sec_mmwp; + } + + void set_sec_rlb(int _sec_rlb) + { + sec_rlb = _sec_rlb; + } + + void set_tag(const string &_tag) + { + tag = _tag; + } + + void generate_pmp_csr_1(ostream &stream, unsigned int indent = 0) + { + string indent_str (indent, ' '); + indent = 0; + + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * "; + stream << tag; + stream << "\n"; + stream << indent_str; + stream << " * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel."; + stream << "\n"; + stream << indent_str; + stream << " * "; + stream << "\n"; + stream << indent_str; + stream << " * This test program is verify the pmp CSR access when seccfg introduced."; + stream << "\n"; + stream << indent_str; + stream << " * It's expected to executed from M mode."; + stream << "\n"; + stream << indent_str; + stream << " * "; + stream << "\n"; + stream << indent_str; + stream << " * Remarks:"; + stream << "\n"; + stream << indent_str; + stream << " * - CSR protection for non-M mode access is assumed and not coverred."; + stream << "\n"; + stream << indent_str; + stream << " * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred."; + stream << "\n"; + stream << indent_str; + stream << " * - Executed on RV64 only."; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * Macros from encoding.h"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "#define MSTATUS_MPP 0x00001800"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_R 0x01"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_W 0x02"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_X 0x04"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_A 0x18"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_L 0x80"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_SHIFT 2"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#if ((PMP_R | PMP_W | PMP_X) != 0x7)"; + stream << "\n"; + stream << indent_str; + stream << "#error unexpected"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_OFF 0x0"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_TOR 0x08"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_NA4 0x10"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_NAPOT 0x18"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#define MSECCFG_MML 0x1"; + stream << "\n"; + stream << indent_str; + stream << "#define MSECCFG_MMWP 0x2"; + stream << "\n"; + stream << indent_str; + stream << "#define MSECCFG_RLB 0x4"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#define TEST_RW 1"; + stream << "\n"; + stream << indent_str; + stream << "#define TEST_FETCH 1"; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * Whether rwx share single cfg for M mode"; + stream << "\n"; + stream << indent_str; + stream << " * When "; + stream << "@"; + stream << "set_sec_mml"; + stream << "@"; + stream << " set, it must be 0, otherwise unexpected exception"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "#define M_MODE_RWX "; + stream << m_mode_rwx; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#define CAUSE_LOAD_ACCESS 0x5"; + stream << "\n"; + stream << indent_str; + stream << "#define CAUSE_STORE_ACCESS 0x7"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "typedef unsigned long reg_t;"; + stream << "\n"; + stream << indent_str; + stream << "typedef unsigned long uintptr_t;"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * functions from syscalls.c"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "#if PRINTF_SUPPORTED"; + stream << "\n"; + stream << indent_str; + stream << "int printf(const char* fmt, ...);"; + stream << "\n"; + stream << indent_str; + stream << "#else"; + stream << "\n"; + stream << indent_str; + stream << "#define printf(...)"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "void __attribute__((noreturn)) tohost_exit(uintptr_t code);"; + stream << "\n"; + stream << indent_str; + stream << "void exit(int code);"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * local status"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "#define TEST_MEM_START 0x200000"; + stream << "\n"; + stream << indent_str; + stream << "#define TEST_MEM_END 0x240000"; + stream << "\n"; + stream << indent_str; + stream << "#define U_MEM_END (TEST_MEM_END + 0x10000)"; + stream << "\n"; + stream << indent_str; + stream << "#define FAKE_ADDRESS 0x10000000"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "static const unsigned long expected_seccfg_fail = "; + stream << expected_seccfg_fail; + stream << ";"; + stream << "\n"; + stream << indent_str; + stream << "static unsigned actual_seccfg_fail = 0;"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "static const unsigned long expected_pmpaddr_fail = "; + stream << expected_pmpaddr_fail; + stream << ";"; + stream << "\n"; + stream << indent_str; + stream << "static unsigned actual_pmpaddr_fail = 0;"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "static const unsigned long expected_pmpcfg_fail = "; + stream << expected_pmpcfg_fail; + stream << ";"; + stream << "\n"; + stream << indent_str; + stream << "static unsigned actual_pmpcfg_fail = 0;"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "static void checkTestResult(void);"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * override syscalls.c."; + stream << "\n"; + stream << indent_str; + stream << " * currently simply skip to nexp instruction"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32])"; + stream << "\n"; + stream << indent_str; + stream << "{ "; + stream << "\n"; + stream << indent_str; + stream << " tohost_exit(1337);"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "__attribute ((noinline))"; + stream << "\n"; + stream << indent_str; + stream << "void target_foo() {"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"nop\");"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "__attribute ((section(\".data_test_arr\"), aligned(8)))"; + stream << "\n"; + stream << indent_str; + stream << "volatile unsigned char target_arr[100] = {"; + stream << "\n"; + stream << indent_str; + stream << " 1,2,3,4,5,6,7,8,"; + stream << "\n"; + stream << indent_str; + stream << "};"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "__attribute ((section(\".text_umode\"), noinline))"; + stream << "\n"; + stream << indent_str; + stream << "void target_foo_U() {"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"nop\");"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "__attribute ((section(\".data_umode\"), aligned(8)))"; + stream << "\n"; + stream << indent_str; + stream << "volatile unsigned char target_arr_U[100] = {"; + stream << "\n"; + stream << indent_str; + stream << " 1,2,3,4,5,6,7,8,"; + stream << "\n"; + stream << indent_str; + stream << "};"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * On processor_t::reset():"; + stream << "\n"; + stream << indent_str; + stream << " * - set_csr(CSR_PMPADDR0, ~reg_t(0));"; + stream << "\n"; + stream << indent_str; + stream << " * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT);"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "static void set_cfg() {"; + stream << "\n"; + stream << indent_str; + stream << "#if 1 // "; + stream << "@"; + stream << "set_rlb_at_start:int"; + stream << "@"; + stream << "\n"; + stream << indent_str; + stream << " /*"; + stream << "\n"; + stream << indent_str; + stream << " * set MSECCFG_RLB to avoid locked at start"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrs 0x747, %0 \\n\"::\"r\"(MSECCFG_RLB));"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"nop\");"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << "//------------------------Set current status before the test target (CSR access)"; + stream << "\n"; + stream << indent_str; + stream << " /*"; + stream << "\n"; + stream << indent_str; + stream << " * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR."; + stream << "\n"; + stream << indent_str; + stream << " * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance."; + stream << "\n"; + stream << indent_str; + stream << " * Also use pmp3cfg for fixed U mode (U_MEM)."; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr3, %0 \\n\" :: \"r\"(U_MEM_END >> 2) : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr2, %0 \\n\" :: \"r\"(TEST_MEM_END >> 2) : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr1, %0 \\n\" :: \"r\"(TEST_MEM_START >> 2) : \"memory\");"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << "#if M_MODE_RWX"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr0, %0 \\n\" :: \"r\"((TEST_MEM_START >> 3) - 1) : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << " reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT);"; + stream << "\n"; + stream << indent_str; + stream << "#else"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr6, %0 \\n\" :: \"r\"(TEST_MEM_START >> 2) : \"memory\"); // for data"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr5, %0 \\n\" :: \"r\"(0x80010000 >> 2) : \"memory\"); // for code"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr4, %0 \\n\" :: \"r\"(0x80000000 >> 2) : \"memory\"); // addr start"; + stream << "\n"; + stream << indent_str; + stream << " reg_t cfg0 = PMP_OFF;"; + stream << "\n"; + stream << indent_str; + stream << " reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8);"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " if ("; + stream << pre_sec_mml; + stream << ") { // need to set L bit for M mode code access"; + stream << "\n"; + stream << indent_str; + stream << "#if M_MODE_RWX"; + stream << "\n"; + stream << indent_str; + stream << " cfg0 |= PMP_L;"; + stream << "\n"; + stream << indent_str; + stream << "#else"; + stream << "\n"; + stream << indent_str; + stream << " cfg1 |= ((PMP_L << 8) | (PMP_L << 16));"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | (("; + stream << lock_once; + stream << " || "; + stream << pmp_lock; + stream << ") ? PMP_L : 0);"; + stream << "\n"; + stream << indent_str; + stream << " cfg0 |= sub_cfg << 24; // for U_MEM"; + stream << "\n"; + stream << indent_str; + stream << " cfg0 |= sub_cfg << 16; // for TEST_MEM"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << "#if !M_MODE_RWX"; + stream << "\n"; + stream << indent_str; + stream << "#if __riscv_xlen == 64"; + stream << "\n"; + stream << indent_str; + stream << " cfg0 |= (cfg1 << 32);"; + stream << "\n"; + stream << indent_str; + stream << "#else"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpcfg1, %0 \\n\""; + stream << "\n"; + stream << indent_str; + stream << " :"; + stream << "\n"; + stream << indent_str; + stream << " : \"r\"(cfg1)"; + stream << "\n"; + stream << indent_str; + stream << " : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << "#endif // __riscv_xlen == 64"; + stream << "\n"; + stream << indent_str; + stream << "#endif // !M_MODE_RWX"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpcfg0, %0 \\n\""; + stream << "\n"; + stream << indent_str; + stream << " :"; + stream << "\n"; + stream << indent_str; + stream << " : \"r\"(cfg0)"; + stream << "\n"; + stream << indent_str; + stream << " : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " if ("; + stream << lock_once; + stream << " != "; + stream << pmp_lock; + stream << ") {"; + stream << "\n"; + stream << indent_str; + stream << " reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type"; + stream << "\n"; + stream << indent_str; + stream << " if ("; + stream << pmp_lock; + stream << ") {"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrs pmpcfg0, %0 \\n\""; + stream << "\n"; + stream << indent_str; + stream << " :"; + stream << "\n"; + stream << indent_str; + stream << " : \"r\"(lock_bits)"; + stream << "\n"; + stream << indent_str; + stream << " : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << " } else {"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrc pmpcfg0, %0 \\n\""; + stream << "\n"; + stream << indent_str; + stream << " :"; + stream << "\n"; + stream << indent_str; + stream << " : \"r\"(lock_bits)"; + stream << "\n"; + stream << indent_str; + stream << " : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " // set proc->state.mseccfg"; + stream << "\n"; + stream << indent_str; + stream << " const unsigned seccfg_bits = ("; + stream << lock_bypass; + stream << " ? MSECCFG_RLB : 0) "; + stream << "\n"; + stream << indent_str; + stream << " | ("; + stream << pre_sec_mml; + stream << " ? MSECCFG_MML : 0) "; + stream << "\n"; + stream << indent_str; + stream << " | ("; + stream << pre_sec_mmwp; + stream << " ? MSECCFG_MMWP : 0);"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw 0x747, %0 \\n\"::\"r\"(seccfg_bits));"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << "//------------------------Test target"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"nop\");"; + stream << "\n"; + stream << indent_str; + stream << " /*"; + stream << "\n"; + stream << indent_str; + stream << " * Need to separate pmp and seccfg access since pmplock_recorded status may be "; + stream << "\n"; + stream << indent_str; + stream << " * updated again when accessing pmpcfg."; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << " reg_t wval = 0, rval;"; + stream << "\n"; + stream << indent_str; + stream << "#if "; + stream << group_pmp; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrr %0, pmpaddr"; + stream << addr_idx; + stream << " \\n\""; + stream << "\n"; + stream << indent_str; + stream << " : \"=r\"(rval));"; + stream << "\n"; + stream << indent_str; + stream << " // give a valid value for both NAPOT and TOR"; + stream << "\n"; + stream << indent_str; + stream << " if ("; + stream << addr_idx; + stream << " == 0) {"; + stream << "\n"; + stream << indent_str; + stream << " wval = ((rval + 1) << 1) - 1; // NAPOT mask"; + stream << "\n"; + stream << indent_str; + stream << " } else {"; + stream << "\n"; + stream << indent_str; + stream << " wval = (rval << 1) + "; + stream << addr_offset; + stream << "; "; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr"; + stream << addr_idx; + stream << ", %1 \\n\""; + stream << "\n"; + stream << indent_str; + stream << " \"\\tcsrr %0, pmpaddr"; + stream << addr_idx; + stream << " \\n\""; + stream << "\n"; + stream << indent_str; + stream << " : \"=r\"(rval)"; + stream << "\n"; + stream << indent_str; + stream << " : \"r\"(wval)"; + stream << "\n"; + stream << indent_str; + stream << " : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << " if (wval != rval) {"; + stream << "\n"; + stream << indent_str; + stream << " printf(\"pmpaddr"; + stream << addr_idx; + stream << " expects %lx vs %lx\\n\", wval, rval);"; + stream << "\n"; + stream << indent_str; + stream << " actual_pmpaddr_fail = 1;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " // Update cfg0 to avoid changing idx other than "; + stream << cfg_sub_idx; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrr %0, pmpcfg"; + stream << cfg_idx; + stream << " \\n\""; + stream << "\n"; + stream << indent_str; + stream << " : \"=r\"(cfg0)"; + stream << "\n"; + stream << indent_str; + stream << " :"; + stream << "\n"; + stream << indent_str; + stream << " : \"memory\");"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " // reuse lock_once here since it's for RLB and independent with pmp_lock"; + stream << "\n"; + stream << indent_str; + stream << " wval = cfg0 ^ ((reg_t)("; + stream << revert_rwx; + stream << " | ("; + stream << lock_once; + stream << " ? PMP_L : 0)) << ("; + stream << cfg_sub_idx; + stream << " * 8));"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpcfg"; + stream << cfg_idx; + stream << ", %1 \\n\""; + stream << "\n"; + stream << indent_str; + stream << " \"\\tcsrr %0, pmpcfg"; + stream << cfg_idx; + stream << " \\n\""; + stream << "\n"; + stream << indent_str; + stream << " : \"=r\"(rval)"; + stream << "\n"; + stream << indent_str; + stream << " : \"r\"(wval)"; + stream << "\n"; + stream << indent_str; + stream << " : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << " if (wval != rval) {"; + stream << "\n"; + stream << indent_str; + stream << " printf(\"pmpcfg expects %lx vs %lx\\n\", wval, rval);"; + stream << "\n"; + stream << indent_str; + stream << " actual_pmpcfg_fail = 1;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << "#else"; + stream << "\n"; + stream << indent_str; + stream << " /*"; + stream << "\n"; + stream << indent_str; + stream << " * need to set PMP_L for cfg0 otherwise next PC will illegal"; + stream << "\n"; + stream << indent_str; + stream << " * This is a little coverage hole for non-PMP_L + mml, which should be"; + stream << "\n"; + stream << indent_str; + stream << " * a restricted use case and can be accepted anyway."; + stream << "\n"; + stream << indent_str; + stream << " */ "; + stream << "\n"; + stream << indent_str; + stream << " if ("; + stream << sec_mml; + stream << ") { "; + stream << "\n"; + stream << indent_str; + stream << "#if M_MODE_RWX"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrs pmpcfg0, %0 \\n\"::\"r\"(PMP_L));"; + stream << "\n"; + stream << indent_str; + stream << "#else"; + stream << "\n"; + stream << indent_str; + stream << "#if __riscv_xlen == 64"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrs pmpcfg0, %0 \\n\"::\"r\"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48)));"; + stream << "\n"; + stream << indent_str; + stream << "#else"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrs pmpcfg1, %0 \\n\"::\"r\"((PMP_L << 8) | (PMP_L << 16)));"; + stream << "\n"; + stream << indent_str; + stream << "#endif // __riscv_xlen == 64"; + stream << "\n"; + stream << indent_str; + stream << "#endif // M_MODE_RWX"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " wval = ("; + stream << sec_rlb; + stream << " ? MSECCFG_RLB : 0) "; + stream << "\n"; + stream << indent_str; + stream << " | ("; + stream << sec_mml; + stream << " ? MSECCFG_MML : 0) "; + stream << "\n"; + stream << indent_str; + stream << " | ("; + stream << sec_mmwp; + stream << " ? MSECCFG_MMWP : 0);"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw 0x747, %1 \\n\""; + stream << "\n"; + stream << indent_str; + stream << " \"\\tcsrr %0, 0x747 \\n\""; + stream << "\n"; + stream << indent_str; + stream << " : \"=r\"(rval)"; + stream << "\n"; + stream << indent_str; + stream << " : \"r\"(wval)"; + stream << "\n"; + stream << indent_str; + stream << " : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << " reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP);"; + stream << "\n"; + stream << indent_str; + stream << " /*"; + stream << "\n"; + stream << indent_str; + stream << " * pre_sec_mml means cfg0 locked"; + stream << "\n"; + stream << indent_str; + stream << " * pmp_lock means cfg2/3 locked"; + stream << "\n"; + stream << indent_str; + stream << " * sec_mml is the test coverage hole just mentioned"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << " if (("; + stream << pre_sec_mml; + stream << " || "; + stream << pmp_lock; + stream << " || "; + stream << sec_mml; + stream << ") "; + stream << "\n"; + stream << indent_str; + stream << " && "; + stream << lock_bypass; + stream << " == 0) {"; + stream << "\n"; + stream << indent_str; + stream << " expected_val &= ~MSECCFG_RLB;"; + stream << "\n"; + stream << indent_str; + stream << " } "; + stream << "\n"; + stream << indent_str; + stream << " if ("; + stream << pre_sec_mml; + stream << ") {"; + stream << "\n"; + stream << indent_str; + stream << " expected_val |= MSECCFG_MML;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << " if ("; + stream << pre_sec_mmwp; + stream << ") {"; + stream << "\n"; + stream << indent_str; + stream << " expected_val |= MSECCFG_MMWP;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " if (expected_val != rval) actual_seccfg_fail = 1;"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "static void checkTestResult() {"; + stream << "\n"; + stream << indent_str; + stream << " int ret = 0;"; + stream << "\n"; + stream << indent_str; + stream << " if (expected_seccfg_fail != actual_seccfg_fail) {"; + stream << "\n"; + stream << indent_str; + stream << " ret += 1;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " if (expected_pmpaddr_fail != actual_pmpaddr_fail) {"; + stream << "\n"; + stream << indent_str; + stream << " ret += 2;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " if (expected_pmpcfg_fail != actual_pmpcfg_fail) {"; + stream << "\n"; + stream << indent_str; + stream << " ret += 4;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " exit(ret); "; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "int main() {"; + stream << "\n"; + stream << indent_str; + stream << " // assert in M mode"; + stream << "\n"; + stream << indent_str; + stream << " set_cfg();"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " checkTestResult();"; + stream << "\n"; + stream << indent_str; + stream << " return 0; // assert 0"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + } +}; + +#endif // PMP_CSR_1_GEN_CLASS_H diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_ok_1.cc_skel b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_ok_1.cc_skel new file mode 100644 index 00000000..7d10c502 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_ok_1.cc_skel @@ -0,0 +1,344 @@ + +/* + * @tag@ + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @@changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @@set_sec_mml@@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX @m_mode_rwx:int@ + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = @expected_rw_fail:int@; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = @expected_x_fail:int@; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if @switch_u_mode:int@ + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = @pmp_addr_offset:int@; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @@pmp_addr_offset:int@@ is to create an address mismatch + * And @@create_pmp_cfg:int@@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = @pmp_addr_offset:int@; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (@set_sec_mml:int@) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if @create_pmp_cfg:int@ + cfg0 |= ( (@pmp_r:int@ ? PMP_R : 0) + | (@pmp_w:int@ ? PMP_W : 0) + | (@pmp_x:int@ ? PMP_X : 0) + | PMP_TOR | (@pmp_l:int@ ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (@set_sec_mml:int@ ? MSECCFG_MML : 0) | (@set_sec_mmwp:int@ ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_ok_1.h b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_ok_1.h new file mode 100644 index 00000000..2e004f6e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_ok_1.h @@ -0,0 +1,1177 @@ +/* + * File automatically generated by + * gengen 1.4.2 by Lorenzo Bettini + * http://www.gnu.org/software/gengen + */ + +#ifndef PMP_OK_1_GEN_CLASS_H +#define PMP_OK_1_GEN_CLASS_H + +#include +#include + +using std::string; +using std::ostream; + +class pmp_ok_1_gen_class +{ + protected: + int create_pmp_cfg; + int expected_rw_fail; + int expected_x_fail; + int m_mode_rwx; + int pmp_addr_offset; + int pmp_l; + int pmp_r; + int pmp_w; + int pmp_x; + int set_sec_mml; + int set_sec_mmwp; + int switch_u_mode; + string tag; + + public: + pmp_ok_1_gen_class() : + create_pmp_cfg (0), expected_rw_fail (0), expected_x_fail (0), m_mode_rwx (0), pmp_addr_offset (0), pmp_l (0), pmp_r (0), pmp_w (0), pmp_x (0), set_sec_mml (0), set_sec_mmwp (0), switch_u_mode (0) + { + } + + pmp_ok_1_gen_class(int _create_pmp_cfg, int _expected_rw_fail, int _expected_x_fail, int _m_mode_rwx, int _pmp_addr_offset, int _pmp_l, int _pmp_r, int _pmp_w, int _pmp_x, int _set_sec_mml, int _set_sec_mmwp, int _switch_u_mode, const string &_tag) : + create_pmp_cfg (_create_pmp_cfg), expected_rw_fail (_expected_rw_fail), expected_x_fail (_expected_x_fail), m_mode_rwx (_m_mode_rwx), pmp_addr_offset (_pmp_addr_offset), pmp_l (_pmp_l), pmp_r (_pmp_r), pmp_w (_pmp_w), pmp_x (_pmp_x), set_sec_mml (_set_sec_mml), set_sec_mmwp (_set_sec_mmwp), switch_u_mode (_switch_u_mode), tag (_tag) + { + } + + void set_create_pmp_cfg(int _create_pmp_cfg) + { + create_pmp_cfg = _create_pmp_cfg; + } + + void set_expected_rw_fail(int _expected_rw_fail) + { + expected_rw_fail = _expected_rw_fail; + } + + void set_expected_x_fail(int _expected_x_fail) + { + expected_x_fail = _expected_x_fail; + } + + void set_m_mode_rwx(int _m_mode_rwx) + { + m_mode_rwx = _m_mode_rwx; + } + + void set_pmp_addr_offset(int _pmp_addr_offset) + { + pmp_addr_offset = _pmp_addr_offset; + } + + void set_pmp_l(int _pmp_l) + { + pmp_l = _pmp_l; + } + + void set_pmp_r(int _pmp_r) + { + pmp_r = _pmp_r; + } + + void set_pmp_w(int _pmp_w) + { + pmp_w = _pmp_w; + } + + void set_pmp_x(int _pmp_x) + { + pmp_x = _pmp_x; + } + + void set_set_sec_mml(int _set_sec_mml) + { + set_sec_mml = _set_sec_mml; + } + + void set_set_sec_mmwp(int _set_sec_mmwp) + { + set_sec_mmwp = _set_sec_mmwp; + } + + void set_switch_u_mode(int _switch_u_mode) + { + switch_u_mode = _switch_u_mode; + } + + void set_tag(const string &_tag) + { + tag = _tag; + } + + void generate_pmp_ok_1(ostream &stream, unsigned int indent = 0) + { + string indent_str (indent, ' '); + indent = 0; + + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * "; + stream << tag; + stream << "\n"; + stream << indent_str; + stream << " * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel."; + stream << "\n"; + stream << indent_str; + stream << " * "; + stream << "\n"; + stream << indent_str; + stream << " * This test program is expected to start executed from M mode."; + stream << "\n"; + stream << indent_str; + stream << " * That will be easier for us to deal with pmp exception for test."; + stream << "\n"; + stream << indent_str; + stream << " * "; + stream << "\n"; + stream << indent_str; + stream << " * Remarks:"; + stream << "\n"; + stream << indent_str; + stream << " * - RW=01 not covered. U/M mode share will be tested separately"; + stream << "\n"; + stream << indent_str; + stream << " * - RLB is always 0. CSR access control will be tested separately"; + stream << "\n"; + stream << indent_str; + stream << " * "; + stream << "\n"; + stream << indent_str; + stream << " * "; + stream << "@"; + stream << "changed 2020-Mar-2 soberl"; + stream << "\n"; + stream << indent_str; + stream << " * For RWXL + MML, need to separate R and W combinations."; + stream << "\n"; + stream << indent_str; + stream << " * Skip RW=01 (share mode) at generator driver side."; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * Macros from encoding.h"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "#define MSTATUS_MPP 0x00001800"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_R 0x01"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_W 0x02"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_X 0x04"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_A 0x18"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_L 0x80"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_SHIFT 2"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_OFF 0x0"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_TOR 0x08"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_NA4 0x10"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_NAPOT 0x18"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#define MSECCFG_MML 0x1"; + stream << "\n"; + stream << indent_str; + stream << "#define MSECCFG_MMWP 0x2"; + stream << "\n"; + stream << indent_str; + stream << "#define MSECCFG_RLB 0x4"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#define TEST_RW 1"; + stream << "\n"; + stream << indent_str; + stream << "#define TEST_FETCH 1"; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * Whether rwx share single cfg for M mode"; + stream << "\n"; + stream << indent_str; + stream << " * When "; + stream << "@"; + stream << "set_sec_mml"; + stream << "@"; + stream << " set, it must be 0, otherwise unexpected exception"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "#define M_MODE_RWX "; + stream << m_mode_rwx; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#define CAUSE_LOAD_ACCESS 0x5"; + stream << "\n"; + stream << indent_str; + stream << "#define CAUSE_STORE_ACCESS 0x7"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "typedef unsigned long reg_t;"; + stream << "\n"; + stream << indent_str; + stream << "typedef unsigned long uintptr_t;"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * functions from syscalls.c"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "#if PRINTF_SUPPORTED"; + stream << "\n"; + stream << indent_str; + stream << "int printf(const char* fmt, ...);"; + stream << "\n"; + stream << indent_str; + stream << "#else"; + stream << "\n"; + stream << indent_str; + stream << "#define printf(...)"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "void __attribute__((noreturn)) tohost_exit(uintptr_t code);"; + stream << "\n"; + stream << indent_str; + stream << "void exit(int code);"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * local status"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "#define TEST_MEM_START 0x200000"; + stream << "\n"; + stream << indent_str; + stream << "#define TEST_MEM_END 0x240000"; + stream << "\n"; + stream << indent_str; + stream << "#define U_MEM_END (TEST_MEM_END + 0x10000)"; + stream << "\n"; + stream << indent_str; + stream << "#define FAKE_ADDRESS 0x10000000"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "static const unsigned long expected_rw_fail = "; + stream << expected_rw_fail; + stream << ";"; + stream << "\n"; + stream << indent_str; + stream << "static unsigned actual_rw_fail = 0;"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "static const unsigned long expected_x_fail = "; + stream << expected_x_fail; + stream << ";"; + stream << "\n"; + stream << indent_str; + stream << "static unsigned actual_x_fail = 0;"; + stream << "\n"; + stream << indent_str; + stream << "static void checkTestResult(void);"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * override syscalls.c."; + stream << "\n"; + stream << indent_str; + stream << " * currently simply skip to nexp instruction"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32])"; + stream << "\n"; + stream << indent_str; + stream << "{"; + stream << "\n"; + stream << indent_str; + stream << " if (epc >= TEST_MEM_START && epc < TEST_MEM_END) {"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"nop\");"; + stream << "\n"; + stream << indent_str; + stream << " actual_x_fail = 1;"; + stream << "\n"; + stream << indent_str; + stream << " checkTestResult();"; + stream << "\n"; + stream << indent_str; + stream << " } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) {"; + stream << "\n"; + stream << indent_str; + stream << " reg_t addr;"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrr %0, mtval\\n\" : \"=r\"(addr));"; + stream << "\n"; + stream << indent_str; + stream << " if (addr >= TEST_MEM_START && addr < TEST_MEM_END) {"; + stream << "\n"; + stream << indent_str; + stream << " actual_rw_fail = 1;"; + stream << "\n"; + stream << indent_str; + stream << " return epc + 4;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + indent = 8; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " if (addr == FAKE_ADDRESS) {"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"nop\");"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"nop\");"; + stream << "\n"; + stream << indent_str; + stream << " checkTestResult();"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " printf(\"cause = %ld, epc = 0x%lx\\n\", cause, epc);"; + stream << "\n"; + stream << indent_str; + stream << " tohost_exit(1337);"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "// switch (eret) to U mode and resume next PC"; + stream << "\n"; + stream << indent_str; + stream << "static void switch_to_U() {"; + stream << "\n"; + stream << indent_str; + stream << " reg_t tmp;"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile ("; + stream << "\n"; + stream << indent_str; + stream << " \"li %0, %1\\n\""; + stream << "\n"; + stream << indent_str; + stream << " \"\\tcsrc mstatus, t0\\n\""; + stream << "\n"; + stream << indent_str; + stream << " \"\\tla %0, try_access_umode \\n\""; + stream << "\n"; + stream << indent_str; + stream << " \"\\tcsrw mepc, %0\\n\""; + stream << "\n"; + stream << indent_str; + stream << " \"\\tli sp, %2\\n\""; + stream << "\n"; + stream << indent_str; + stream << " \"\\tmret\\n\""; + stream << "\n"; + stream << indent_str; + stream << " : \"=r\"(tmp) : \"n\"(MSTATUS_MPP), \"n\"(U_MEM_END) : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "static void switch_mode() {"; + stream << "\n"; + stream << indent_str; + stream << "#if "; + stream << switch_u_mode; + stream << "\n"; + stream << indent_str; + stream << " switch_to_U();"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "__attribute ((noinline))"; + stream << "\n"; + stream << indent_str; + stream << "static void target_foo() {"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"nop\");"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " actual_x_fail = 0;"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * avoid to access actual_x_fail lies in M mode"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "__attribute ((section(\".text_test_foo\"), noinline))"; + stream << "\n"; + stream << indent_str; + stream << "static void target_foo_umode() {"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"nop\");"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "__attribute ((section(\".data_test_arr\"), aligned(8)))"; + stream << "\n"; + stream << indent_str; + stream << "static volatile unsigned char target_arr[100] = {"; + stream << "\n"; + stream << indent_str; + stream << " 1,2,3,4,5,6,7,8,"; + stream << "\n"; + stream << indent_str; + stream << "};"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "static int detect_pmp_granularity(){"; + stream << "\n"; + stream << indent_str; + stream << " unsigned int granule;"; + stream << "\n"; + stream << indent_str; + stream << " unsigned long int temp_reg;"; + stream << "\n"; + stream << indent_str; + stream << " unsigned long int all_ones = ~0x0UL;"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr0, %0 \\n\" :: \"r\"(all_ones) : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrr %0, pmpaddr0 \\n\" : \"=r\"(temp_reg));"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr0, %0 \\n\" :: \"r\"(0x0) : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " int g = 2;"; + stream << "\n"; + stream << indent_str; + stream << " for(uintptr_t i = 1; i; i<<=1) {"; + stream << "\n"; + stream << indent_str; + stream << " if((temp_reg & i) != 0)"; + stream << "\n"; + stream << indent_str; + stream << " break;"; + stream << "\n"; + stream << indent_str; + stream << " g++;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << " granule = 1UL << g;"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " return granule;"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "static int mismatch_addr_offset(int granule_size){"; + stream << "\n"; + stream << indent_str; + stream << " unsigned int addr_offset = "; + stream << pmp_addr_offset; + stream << ";"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " if (addr_offset == 0x0){"; + stream << "\n"; + stream << indent_str; + stream << " return 0x0;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << " else {"; + stream << "\n"; + stream << indent_str; + stream << " unsigned int mismatch_offset = granule_size;"; + stream << "\n"; + stream << indent_str; + stream << " while (mismatch_offset < addr_offset){"; + stream << "\n"; + stream << indent_str; + stream << " mismatch_offset = mismatch_offset << 0x1;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << " return mismatch_offset;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * On processor_t::reset():"; + stream << "\n"; + stream << indent_str; + stream << " * - set_csr(CSR_PMPADDR0, ~reg_t(0));"; + stream << "\n"; + stream << indent_str; + stream << " * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT);"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "static void set_cfg() {"; + stream << "\n"; + stream << indent_str; + stream << "#if 1"; + stream << "\n"; + stream << indent_str; + stream << " /*"; + stream << "\n"; + stream << indent_str; + stream << " * set MSECCFG_RLB to avoid locked"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << " unsigned rlb_value = MSECCFG_RLB;"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrs 0x747, %0 \\n\"::\"r\"(rlb_value));"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " /*"; + stream << "\n"; + stream << indent_str; + stream << " * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR."; + stream << "\n"; + stream << indent_str; + stream << " * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance."; + stream << "\n"; + stream << indent_str; + stream << " * Also use pmp3cfg for fixed U mode (U_MEM)."; + stream << "\n"; + stream << indent_str; + stream << " * "; + stream << "\n"; + stream << indent_str; + stream << " * Here "; + stream << "@"; + stream << "pmp_addr_offset:int"; + stream << "@"; + stream << " is to create an address mismatch"; + stream << "\n"; + stream << indent_str; + stream << " * And "; + stream << "@"; + stream << "create_pmp_cfg:int"; + stream << "@"; + stream << " is to create cfg mismatch."; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " unsigned int mismatch_offset = "; + stream << pmp_addr_offset; + stream << ";"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " if (mismatch_offset != 0x0){"; + stream << "\n"; + stream << indent_str; + stream << " volatile int pmp_granularity = detect_pmp_granularity();"; + stream << "\n"; + stream << indent_str; + stream << " mismatch_offset = mismatch_addr_offset(pmp_granularity);"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr3, %0 \\n\" :: \"r\"(U_MEM_END >> 2) : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr2, %0 \\n\" :: \"r\"(TEST_MEM_END >> 2) : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr1, %0 \\n\" :: \"r\"((TEST_MEM_START + mismatch_offset) >> 2) : \"memory\");"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << "#if M_MODE_RWX"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr0, %0 \\n\" :: \"r\"((TEST_MEM_START >> 3) - 1) : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << " reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT);"; + stream << "\n"; + stream << indent_str; + stream << "#else"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr6, %0 \\n\" :: \"r\"(TEST_MEM_START >> 2) : \"memory\"); // for data"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr5, %0 \\n\" :: \"r\"(0x80010000 >> 2) : \"memory\"); // for code"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr4, %0 \\n\" :: \"r\"(0x80000000 >> 2) : \"memory\"); // addr start"; + stream << "\n"; + stream << indent_str; + stream << " reg_t cfg0 = PMP_OFF;"; + stream << "\n"; + stream << indent_str; + stream << " reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8);"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " // Only true for Spike"; + stream << "\n"; + stream << indent_str; + stream << "// asm volatile (\"csrr %0, pmpcfg0\\n\":\"=r\"(cfg0)); "; + stream << "\n"; + stream << indent_str; + stream << "// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) {"; + stream << "\n"; + stream << indent_str; + stream << "// exit(cfg0);"; + stream << "\n"; + stream << indent_str; + stream << "// }"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " if ("; + stream << set_sec_mml; + stream << ") { // need to set L bit for M mode code like trap_handling"; + stream << "\n"; + stream << indent_str; + stream << "#if M_MODE_RWX"; + stream << "\n"; + stream << indent_str; + stream << " cfg0 |= PMP_L;"; + stream << "\n"; + stream << indent_str; + stream << "#else"; + stream << "\n"; + stream << indent_str; + stream << " cfg1 |= ((PMP_L << 8) | (PMP_L << 16));"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM"; + stream << "\n"; + stream << indent_str; + stream << "#if "; + stream << create_pmp_cfg; + stream << "\n"; + stream << indent_str; + stream << " cfg0 |= ( ("; + stream << pmp_r; + stream << " ? PMP_R : 0)"; + stream << "\n"; + stream << indent_str; + stream << " | ("; + stream << pmp_w; + stream << " ? PMP_W : 0)"; + stream << "\n"; + stream << indent_str; + stream << " | ("; + stream << pmp_x; + stream << " ? PMP_X : 0) "; + stream << "\n"; + stream << indent_str; + stream << " | PMP_TOR | ("; + stream << pmp_l; + stream << " ? PMP_L : 0)) << 16;"; + stream << "\n"; + stream << indent_str; + stream << "#endif "; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << "#if !M_MODE_RWX"; + stream << "\n"; + stream << indent_str; + stream << "#if __riscv_xlen == 64"; + stream << "\n"; + stream << indent_str; + stream << " cfg0 |= (cfg1 << 32);"; + stream << "\n"; + stream << indent_str; + stream << "#else"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpcfg1, %0 \\n\""; + stream << "\n"; + stream << indent_str; + stream << " :"; + stream << "\n"; + stream << indent_str; + stream << " : \"r\"(cfg1)"; + stream << "\n"; + stream << indent_str; + stream << " : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << "#endif // __riscv_xlen == 64"; + stream << "\n"; + stream << indent_str; + stream << "#endif // !M_MODE_RWX"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpcfg0, %0 \\n\""; + stream << "\n"; + stream << indent_str; + stream << " :"; + stream << "\n"; + stream << indent_str; + stream << " : \"r\"(cfg0)"; + stream << "\n"; + stream << indent_str; + stream << " : \"memory\");"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " // set proc->state.mseccfg, for MML/MMWP"; + stream << "\n"; + stream << indent_str; + stream << " const unsigned seccfg_bits = ("; + stream << set_sec_mml; + stream << " ? MSECCFG_MML : 0) | ("; + stream << set_sec_mmwp; + stream << " ? MSECCFG_MMWP : 0);"; + stream << "\n"; + stream << indent_str; + stream << " if (seccfg_bits) {"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrs 0x747, %0 \\n\"::\"r\"(seccfg_bits));"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " // currently dummy since tlb flushed when set_csr on mseccfg"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"fence.i \\n\");"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "// from pmp_ok() side,W/R/X is similar"; + stream << "\n"; + stream << indent_str; + stream << "__attribute ((noinline))"; + stream << "\n"; + stream << indent_str; + stream << "static void try_access() {"; + stream << "\n"; + stream << indent_str; + stream << "#if TEST_RW"; + stream << "\n"; + stream << indent_str; + stream << " target_arr[0] += 1;"; + stream << "\n"; + stream << indent_str; + stream << " const unsigned long delta = (unsigned long)0x1020304005060708ULL;"; + stream << "\n"; + stream << indent_str; + stream << " *(long *)target_arr += delta;"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) {"; + stream << "\n"; + stream << indent_str; + stream << " actual_rw_fail = 1;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#if TEST_FETCH"; + stream << "\n"; + stream << indent_str; + stream << " actual_x_fail = 1; // reset inside target_foo()"; + stream << "\n"; + stream << indent_str; + stream << " target_foo();"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "// in case mml set, printf cannot be used in U mode"; + stream << "\n"; + stream << indent_str; + stream << "__attribute ((section(\".text_umode\")))"; + stream << "\n"; + stream << indent_str; + stream << "void try_access_umode() {"; + stream << "\n"; + stream << indent_str; + stream << "#if TEST_RW"; + stream << "\n"; + stream << indent_str; + stream << " target_arr[0] += 1;"; + stream << "\n"; + stream << indent_str; + stream << "// const unsigned long delta = 0x1020304005060708UL;"; + stream << "\n"; + stream << indent_str; + stream << "// *(long *)target_arr += delta;"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) {"; + stream << "\n"; + stream << indent_str; + stream << "// actual_rw_fail = 1;"; + stream << "\n"; + stream << indent_str; + stream << "// }"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#if TEST_FETCH"; + stream << "\n"; + stream << indent_str; + stream << " target_foo_umode();"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " /*"; + stream << "\n"; + stream << indent_str; + stream << " * switch to M mode by invoking a write access fault for special address."; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << " volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS);"; + stream << "\n"; + stream << indent_str; + stream << " *p = 1;"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "static void checkTestResult() {"; + stream << "\n"; + stream << indent_str; + stream << " int ret = 0;"; + stream << "\n"; + stream << indent_str; + stream << " if (expected_rw_fail != actual_rw_fail) {"; + stream << "\n"; + stream << indent_str; + stream << " ret += 1;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " if (expected_x_fail != actual_x_fail) {"; + stream << "\n"; + stream << indent_str; + stream << " ret += 2;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " exit(ret); "; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "int main() {"; + stream << "\n"; + stream << indent_str; + stream << " // assert in M mode"; + stream << "\n"; + stream << indent_str; + stream << " set_cfg();"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " switch_mode(); // in case swith to u mode, branch to try_access_umode directly"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " try_access();"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " checkTestResult();"; + stream << "\n"; + stream << indent_str; + stream << " return 0; // assert 0"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + } +}; + +#endif // PMP_OK_1_GEN_CLASS_H diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_ok_share_1.cc_skel b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_ok_share_1.cc_skel new file mode 100644 index 00000000..ca23eb30 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_ok_share_1.cc_skel @@ -0,0 +1,295 @@ + +/* + * @tag@ + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - @typex:int@) +#define TEST_FETCH (@typex:int@) +/* + * Whether rwx share single cfg for M mode + * When @@set_sec_mml@@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX @m_mode_rwx:int@ + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = @expected_r_fail:int@; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = @expected_w_fail:int@; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = @expected_x_fail:int@; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((@pmp_r:int@ ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (@pmp_x:int@ ? PMP_X : 0) + | (@pmp_l:int@ ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if @enable_umode_test:int@ + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_ok_share_1.h b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_ok_share_1.h new file mode 100644 index 00000000..7fb51808 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_ok_share_1.h @@ -0,0 +1,997 @@ +/* + * File automatically generated by + * gengen 1.4.2 by Lorenzo Bettini + * http://www.gnu.org/software/gengen + */ + +#ifndef PMP_OK_SHARE_1_GEN_CLASS_H +#define PMP_OK_SHARE_1_GEN_CLASS_H + +#include +#include + +using std::string; +using std::ostream; + +class pmp_ok_share_1_gen_class +{ + protected: + int enable_umode_test; + int expected_r_fail; + int expected_w_fail; + int expected_x_fail; + int m_mode_rwx; + int pmp_l; + int pmp_r; + int pmp_x; + string tag; + int typex; + + public: + pmp_ok_share_1_gen_class() : + enable_umode_test (0), expected_r_fail (0), expected_w_fail (0), expected_x_fail (0), m_mode_rwx (0), pmp_l (0), pmp_r (0), pmp_x (0), typex (0) + { + } + + pmp_ok_share_1_gen_class(int _enable_umode_test, int _expected_r_fail, int _expected_w_fail, int _expected_x_fail, int _m_mode_rwx, int _pmp_l, int _pmp_r, int _pmp_x, const string &_tag, int _typex) : + enable_umode_test (_enable_umode_test), expected_r_fail (_expected_r_fail), expected_w_fail (_expected_w_fail), expected_x_fail (_expected_x_fail), m_mode_rwx (_m_mode_rwx), pmp_l (_pmp_l), pmp_r (_pmp_r), pmp_x (_pmp_x), tag (_tag), typex (_typex) + { + } + + void set_enable_umode_test(int _enable_umode_test) + { + enable_umode_test = _enable_umode_test; + } + + void set_expected_r_fail(int _expected_r_fail) + { + expected_r_fail = _expected_r_fail; + } + + void set_expected_w_fail(int _expected_w_fail) + { + expected_w_fail = _expected_w_fail; + } + + void set_expected_x_fail(int _expected_x_fail) + { + expected_x_fail = _expected_x_fail; + } + + void set_m_mode_rwx(int _m_mode_rwx) + { + m_mode_rwx = _m_mode_rwx; + } + + void set_pmp_l(int _pmp_l) + { + pmp_l = _pmp_l; + } + + void set_pmp_r(int _pmp_r) + { + pmp_r = _pmp_r; + } + + void set_pmp_x(int _pmp_x) + { + pmp_x = _pmp_x; + } + + void set_tag(const string &_tag) + { + tag = _tag; + } + + void set_typex(int _typex) + { + typex = _typex; + } + + void generate_pmp_ok_share_1(ostream &stream, unsigned int indent = 0) + { + string indent_str (indent, ' '); + indent = 0; + + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * "; + stream << tag; + stream << "\n"; + stream << indent_str; + stream << " * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel."; + stream << "\n"; + stream << indent_str; + stream << " * "; + stream << "\n"; + stream << indent_str; + stream << " * This test program is to test pmp_ok() when share mode (RW=01)."; + stream << "\n"; + stream << indent_str; + stream << " * Based on other test cases for mseccfg stiky bits, this test expects following:"; + stream << "\n"; + stream << indent_str; + stream << " * - RW = 01. For RW != 01, less combinations to show it fail."; + stream << "\n"; + stream << indent_str; + stream << " * - MML set"; + stream << "\n"; + stream << indent_str; + stream << " * - Regine matched."; + stream << "\n"; + stream << indent_str; + stream << " * "; + stream << "\n"; + stream << indent_str; + stream << " * Remarks:"; + stream << "\n"; + stream << indent_str; + stream << " * - "; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * Macros from encoding.h"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "#define MSTATUS_MPP 0x00001800"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_R 0x01"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_W 0x02"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_X 0x04"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_A 0x18"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_L 0x80"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_SHIFT 2"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_OFF 0x0"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_TOR 0x08"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_NA4 0x10"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_NAPOT 0x18"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#define MSECCFG_MML 0x1"; + stream << "\n"; + stream << indent_str; + stream << "#define MSECCFG_MMWP 0x2"; + stream << "\n"; + stream << indent_str; + stream << "#define MSECCFG_RLB 0x4"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#define TEST_RW (1 - "; + stream << typex; + stream << ")"; + stream << "\n"; + stream << indent_str; + stream << "#define TEST_FETCH ("; + stream << typex; + stream << ")"; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * Whether rwx share single cfg for M mode"; + stream << "\n"; + stream << indent_str; + stream << " * When "; + stream << "@"; + stream << "set_sec_mml"; + stream << "@"; + stream << " set, it must be 0, otherwise unexpected exception"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "#define M_MODE_RWX "; + stream << m_mode_rwx; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#define CAUSE_LOAD_ACCESS 0x5"; + stream << "\n"; + stream << indent_str; + stream << "#define CAUSE_STORE_ACCESS 0x7"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "typedef unsigned long reg_t;"; + stream << "\n"; + stream << indent_str; + stream << "typedef unsigned long uintptr_t;"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * functions from syscalls.c"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "#if PRINTF_SUPPORTED"; + stream << "\n"; + stream << indent_str; + stream << "int printf(const char* fmt, ...);"; + stream << "\n"; + stream << indent_str; + stream << "#else"; + stream << "\n"; + stream << indent_str; + stream << "#define printf(...)"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "void __attribute__((noreturn)) tohost_exit(uintptr_t code);"; + stream << "\n"; + stream << indent_str; + stream << "void exit(int code);"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * local status"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "#define TEST_MEM_START 0x200000"; + stream << "\n"; + stream << indent_str; + stream << "#define TEST_MEM_END 0x240000"; + stream << "\n"; + stream << indent_str; + stream << "#define U_MEM_END (TEST_MEM_END + 0x10000)"; + stream << "\n"; + stream << indent_str; + stream << "#define FAKE_ADDRESS 0x10000000"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "static const unsigned long expected_r_fail = "; + stream << expected_r_fail; + stream << ";"; + stream << "\n"; + stream << indent_str; + stream << "static unsigned actual_r_fail = 0;"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "static const unsigned long expected_w_fail = "; + stream << expected_w_fail; + stream << ";"; + stream << "\n"; + stream << indent_str; + stream << "static unsigned actual_w_fail = 0;"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "static const unsigned long expected_x_fail = "; + stream << expected_x_fail; + stream << ";"; + stream << "\n"; + stream << indent_str; + stream << "static unsigned actual_x_fail = 0;"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "static void checkTestResult(void);"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * override syscalls.c."; + stream << "\n"; + stream << indent_str; + stream << " * currently simply skip to nexp instruction"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32])"; + stream << "\n"; + stream << indent_str; + stream << "{"; + stream << "\n"; + stream << indent_str; + stream << " if (epc >= TEST_MEM_START && epc < TEST_MEM_END) {"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"nop\");"; + stream << "\n"; + stream << indent_str; + stream << " actual_x_fail = 1;"; + stream << "\n"; + stream << indent_str; + stream << " checkTestResult();"; + stream << "\n"; + stream << indent_str; + stream << " } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) {"; + stream << "\n"; + stream << indent_str; + stream << " reg_t addr;"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrr %0, mtval\\n\" : \"=r\"(addr));"; + stream << "\n"; + stream << indent_str; + stream << " if (addr >= TEST_MEM_START && addr < TEST_MEM_END) {"; + stream << "\n"; + stream << indent_str; + stream << " if (cause == CAUSE_LOAD_ACCESS)"; + stream << "\n"; + stream << indent_str; + stream << " actual_r_fail = 1;"; + stream << "\n"; + stream << indent_str; + stream << " else "; + stream << "\n"; + stream << indent_str; + stream << " actual_w_fail = 1;"; + stream << "\n"; + stream << indent_str; + stream << " return epc + 4;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + indent = 8; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " if (addr == FAKE_ADDRESS) {"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"nop\");"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"nop\");"; + stream << "\n"; + stream << indent_str; + stream << " checkTestResult();"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " printf(\"cause = %ld, epc = 0x%lx\\n\", cause, epc);"; + stream << "\n"; + stream << indent_str; + stream << " tohost_exit(1337);"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "static void switch_mode_access() {"; + stream << "\n"; + stream << indent_str; + stream << " reg_t tmp;"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile ("; + stream << "\n"; + stream << indent_str; + stream << " \"li %0, %1\\n\""; + stream << "\n"; + stream << indent_str; + stream << " \"\\tcsrc mstatus, t0\\n\""; + stream << "\n"; + stream << indent_str; + stream << " \"\\tla %0, try_access_umode \\n\""; + stream << "\n"; + stream << indent_str; + stream << " \"\\tcsrw mepc, %0\\n\""; + stream << "\n"; + stream << indent_str; + stream << " \"\\tli sp, %2\\n\""; + stream << "\n"; + stream << indent_str; + stream << " \"\\tmret\\n\""; + stream << "\n"; + stream << indent_str; + stream << " : \"=r\"(tmp) : \"n\"(MSTATUS_MPP), \"n\"(U_MEM_END) : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "__attribute ((section(\".text_test_foo\"), noinline))"; + stream << "\n"; + stream << indent_str; + stream << "static void target_foo() {"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"nop\");"; + stream << "\n"; + stream << indent_str; + stream << " actual_x_fail = 0;"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * avoid to access actual_x_fail lies in M mode"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "__attribute ((noinline))"; + stream << "\n"; + stream << indent_str; + stream << "static void target_foo_umode() {"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"nop\");"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "__attribute ((section(\".data_test_arr\"), aligned(8)))"; + stream << "\n"; + stream << indent_str; + stream << "static volatile unsigned char target_arr[100] = {"; + stream << "\n"; + stream << indent_str; + stream << " 1,2,3,4,5,6,7,8,"; + stream << "\n"; + stream << indent_str; + stream << "};"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * On processor_t::reset():"; + stream << "\n"; + stream << indent_str; + stream << " * - set_csr(CSR_PMPADDR0, ~reg_t(0));"; + stream << "\n"; + stream << indent_str; + stream << " * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT);"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "static void set_cfg() {"; + stream << "\n"; + stream << indent_str; + stream << "#if 1"; + stream << "\n"; + stream << indent_str; + stream << " /*"; + stream << "\n"; + stream << indent_str; + stream << " * set MSECCFG_RLB to avoid locked"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << " unsigned rlb_value = MSECCFG_RLB;"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrs 0x747, %0 \\n\"::\"r\"(rlb_value));"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " /*"; + stream << "\n"; + stream << indent_str; + stream << " * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR."; + stream << "\n"; + stream << indent_str; + stream << " * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance."; + stream << "\n"; + stream << indent_str; + stream << " * Also use pmp3cfg for fixed U mode (U_MEM)."; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr3, %0 \\n\" :: \"r\"(U_MEM_END >> 2) : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr2, %0 \\n\" :: \"r\"(TEST_MEM_END >> 2) : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr1, %0 \\n\" :: \"r\"((TEST_MEM_START) >> 2) : \"memory\");"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << "#if M_MODE_RWX"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr0, %0 \\n\" :: \"r\"((TEST_MEM_START >> 3) - 1) : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << " reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT);"; + stream << "\n"; + stream << indent_str; + stream << "#else"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr6, %0 \\n\" :: \"r\"(TEST_MEM_START >> 2) : \"memory\"); // for data"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr5, %0 \\n\" :: \"r\"(0x80010000 >> 2) : \"memory\"); // for code"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr4, %0 \\n\" :: \"r\"(0x80000000 >> 2) : \"memory\"); // addr start"; + stream << "\n"; + stream << indent_str; + stream << " reg_t cfg0 = PMP_OFF;"; + stream << "\n"; + stream << indent_str; + stream << " reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8);"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " // need to set L bit for M mode before set MML"; + stream << "\n"; + stream << indent_str; + stream << "#if M_MODE_RWX"; + stream << "\n"; + stream << indent_str; + stream << " cfg0 |= PMP_L;"; + stream << "\n"; + stream << indent_str; + stream << "#else"; + stream << "\n"; + stream << indent_str; + stream << " cfg1 |= ((PMP_L << 8) | (PMP_L << 16));"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + indent = 8; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << "#if !M_MODE_RWX"; + stream << "\n"; + stream << indent_str; + stream << "#if __riscv_xlen == 64"; + stream << "\n"; + stream << indent_str; + stream << " cfg0 |= (cfg1 << 32);"; + stream << "\n"; + stream << indent_str; + stream << "#else"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpcfg1, %0 \\n\""; + stream << "\n"; + stream << indent_str; + stream << " :"; + stream << "\n"; + stream << indent_str; + stream << " : \"r\"(cfg1)"; + stream << "\n"; + stream << indent_str; + stream << " : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << "#endif // __riscv_xlen == 64"; + stream << "\n"; + stream << indent_str; + stream << "#endif // !M_MODE_RWX"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpcfg0, %0 \\n\""; + stream << "\n"; + stream << indent_str; + stream << " :"; + stream << "\n"; + stream << indent_str; + stream << " : \"r\"(cfg0)"; + stream << "\n"; + stream << indent_str; + stream << " : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << " // set proc->state.mseccfg, for MML/MMWP"; + stream << "\n"; + stream << indent_str; + stream << " const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP;"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrs 0x747, %0 \\n\"::\"r\"(seccfg_bits));"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " // after set MML, RW=01 is possible"; + stream << "\n"; + stream << indent_str; + stream << " cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM"; + stream << "\n"; + stream << indent_str; + stream << " cfg0 |= (("; + stream << pmp_r; + stream << " ? PMP_R : 0) // for TEST_MEM"; + stream << "\n"; + stream << indent_str; + stream << " | PMP_W"; + stream << "\n"; + stream << indent_str; + stream << " | ("; + stream << pmp_x; + stream << " ? PMP_X : 0) "; + stream << "\n"; + stream << indent_str; + stream << " | ("; + stream << pmp_l; + stream << " ? PMP_L : 0) "; + stream << "\n"; + stream << indent_str; + stream << " | PMP_TOR) << 16;"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpcfg0, %0 \\n\""; + stream << "\n"; + stream << indent_str; + stream << " :"; + stream << "\n"; + stream << indent_str; + stream << " : \"r\"(cfg0)"; + stream << "\n"; + stream << indent_str; + stream << " : \"memory\");"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " // currently dummy since tlb flushed when set_csr on mseccfg"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"fence.i \\n\");"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "// from pmp_ok() side,W/R/X is similar"; + stream << "\n"; + stream << indent_str; + stream << "__attribute ((noinline))"; + stream << "\n"; + stream << indent_str; + stream << "static void try_access() {"; + stream << "\n"; + stream << indent_str; + stream << "#if TEST_RW"; + stream << "\n"; + stream << indent_str; + stream << " target_arr[0] += 1;"; + stream << "\n"; + stream << indent_str; + stream << " const unsigned long delta = (unsigned long)0x1020304005060708ULL;"; + stream << "\n"; + stream << indent_str; + stream << " *(long *)target_arr += delta;"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " if (actual_r_fail == 0 && actual_w_fail == 0) {"; + stream << "\n"; + stream << indent_str; + stream << " if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) {"; + stream << "\n"; + stream << indent_str; + stream << " actual_r_fail = 1;"; + stream << "\n"; + stream << indent_str; + stream << " actual_w_fail = 1;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#if TEST_FETCH"; + stream << "\n"; + stream << indent_str; + stream << " actual_x_fail = 1; // reset inside target_foo()"; + stream << "\n"; + stream << indent_str; + stream << " target_foo();"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "// in case mml set, printf cannot be used in U mode"; + stream << "\n"; + stream << indent_str; + stream << "__attribute ((section(\".text_umode\")))"; + stream << "\n"; + stream << indent_str; + stream << "void try_access_umode() {"; + stream << "\n"; + stream << indent_str; + stream << "#if TEST_RW"; + stream << "\n"; + stream << indent_str; + stream << " target_arr[0] += 1;"; + stream << "\n"; + stream << indent_str; + stream << "// const unsigned long delta = 0x1020304005060708UL;"; + stream << "\n"; + stream << indent_str; + stream << "// *(long *)target_arr += delta;"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) {"; + stream << "\n"; + stream << indent_str; + stream << "// actual_rw_fail = 1;"; + stream << "\n"; + stream << indent_str; + stream << "// }"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#if TEST_FETCH"; + stream << "\n"; + stream << indent_str; + stream << " target_foo_umode();"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " /*"; + stream << "\n"; + stream << indent_str; + stream << " * switch to M mode by invoking a write access fault for special address."; + stream << "\n"; + stream << indent_str; + stream << " */ "; + stream << "\n"; + stream << indent_str; + stream << " volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS);"; + stream << "\n"; + stream << indent_str; + stream << " *p = 1;"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "static void checkTestResult() {"; + stream << "\n"; + stream << indent_str; + stream << " int ret = 0;"; + stream << "\n"; + stream << indent_str; + stream << " if (expected_r_fail != actual_r_fail) {"; + stream << "\n"; + stream << indent_str; + stream << " ret += 1;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << " if (expected_w_fail != actual_w_fail) {"; + stream << "\n"; + stream << indent_str; + stream << " ret += 2;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << " if (expected_x_fail != actual_x_fail) {"; + stream << "\n"; + stream << indent_str; + stream << " ret += 4;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " exit(ret); "; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "int main() {"; + stream << "\n"; + stream << indent_str; + stream << " // assert in M mode"; + stream << "\n"; + stream << indent_str; + stream << " set_cfg();"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " try_access();"; + stream << "\n"; + stream << indent_str; + stream << "#if "; + stream << enable_umode_test; + stream << "\n"; + stream << indent_str; + stream << " switch_mode_access(); // access in umode and report final result"; + stream << "\n"; + stream << indent_str; + stream << "#else"; + stream << "\n"; + stream << indent_str; + stream << " checkTestResult();"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + stream << " return 0; // assert 0"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + } +}; + +#endif // PMP_OK_SHARE_1_GEN_CLASS_H diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_tool/Makefile.inc b/vendor/riscv-isa-sim/tests/mseccfg/gengen_tool/Makefile.inc new file mode 100644 index 00000000..acdb1fbe --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_tool/Makefile.inc @@ -0,0 +1,30 @@ + +ifeq ($(shell uname),Linux) + export PLATFORM := Linux +else +ifeq ($(findstring CYGWIN,$(shell uname)),CYGWIN) + export PLATFORM := CygWin +else #MINGW + export PLATFORM := MinGW +endif +endif + +PWD := $(shell pwd) +gengen = ${PWD}/${dir}/tool/gengen-1.4.2/build/${PLATFORM}/bin/gengen + +default: + for skel in ${cc_skel_list}; do\ + $(gengen) -i $$skel.cc_skel --file-name $$skel.h --gen-name $$skel || exit 1; \ + done +gen: + -rm -f ../*.c + g++ -O2 ${cc_file}.cc -o a.out + ./a.out + find .. -name "*.c" | wc -l + +clean: $(OPT_CLEAN) + -rm -f ../*.c + -rm -f a.out + for skel in ${cc_skel_list}; do\ + rm -f $$skel.h || exit 1; \ + done diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_tool/gengen b/vendor/riscv-isa-sim/tests/mseccfg/gengen_tool/gengen new file mode 100755 index 0000000000000000000000000000000000000000..e5b4e95c2f0c400418cafb0f80a173db567ec8a1 GIT binary patch literal 1901792 zcmb4s3w#ts^8bcpB`CO2L8F33SWi&V1OW*UO<*GvTrdJE3YtJjAjl&o8$d+~W>*=< zNJP9tMMdQ}U#Fbr5EPT}jHq`8)DuMo#m5X0!52h9^Z$NpdN#AG-u?cYPjK{gkS7`6=Xm^{V}SMI-=WJ^ zDd*a#SUcBb#U&BfB&ItkM2BoUp{|ZJ=jv$4E9JF+vQ^T)=2Vcg$Kza~xxg$(T{V@j zRTK%{xdxSDWhV$4Uulw{dMr0SWb+*#vdcMFr(Vw0sUPdc|L&@O-8*ChBAm^O;@*B0 zTL7Q`MQ94stzX&&TfZDea=`X=icdg~b9KsHiE`BQ|N3$Cyl_V8EjM7<^ z^ZL#kG_dc${ufltzM!Azp8|%o-H6dw32SOaIh_RZcxbMP_@^G^d0%7tS!bV>G_0!M zg`TREgpM-x}wHW^;_@9pdnfT{t7XHg5Wq+09 z(sAANS^wBSWt?Z4ca`_E-7lRzv-9xde>${%+rndaJlef%?ag^NuF3q)=$^kM`KRO4 z=A;Lk-fFJ7Hv8LqXSThb|MiK#bi4i0|5U6$X7HF`=)I{|O-kvytn=_kieB6DP0FQ@ z_UUnt<8m&k?a34v|AfN#N{q$#q+#(XjN`Li;eSD% zTkfBQ+T+pV=ucJnH{*=I>!D*Z{ z`9H%ieEu27=X@LOc`Oe9t-{|Lhd)cnO^)L~Qqkwf(I2DeUx&lG_4#v0yTI@``j-@+ zU2%B7YL~1yJV5#Mifi}3D!C`19B zAc8a3!87DWO@C-_BeVo2LG|oPED?L}m@nKxx zr#7y>mW}otiKBPMhEZ|#y;#u?h?9H2!e11pr^DxtIR2T6et%p$h7|r+1VMLwuT%Kf z;`q;2__R3s_Z0qLarG^R-T65^PXCayTjx0Z6AFJ!9R4ze|16IGk2ZTxbzJ+FD*Vbg zdd7WzLUH(UHrn%D9G?kF?zA|+dO*=X7-xqJ6jx*x) zbmGjRIQ}0hJ_Fi1P z%036<@c9bgKaS6_3jcPTA3E_zkF)=6N}n&{@ck72wK%!=DE!7a`}a}!7vk(wrSNTW ze2P?k$H(C-mH(d<=g+l@&oy!W=Je~nar)S^7>_44j(?JpI~Dxg?RA;rpBQHcCl0KN zi!%V_b5~q_`z!n-arkEx{y%Z;m8kT|h|}|+qBl^lo}SL075lRVvsmHZL_445In}e` zwj#k)Dg0VoS+BI-*@EG?&d*Bt1No$1n=Nop`L^e0CxB1we47vFJ^Wm)_!r)8#{Ug770Kk=T0vHIKxe$*%X+-#wIRJE^#`cm!+MGv;}*$R562H=x}qH#<(x5>?;i1-4wLzQe(vc1Tfp$No1cK5436j-4+;y~%$?9Db|f@3|wE zzX$XFiqMpz^UmX3f1szJVCu}-vkEFglgdK{1)hSDa|*^3Pc5wo6_*d6F{z@WxWZF# z-MFhqXQUSt6wRAAX>#eDjD8sf1#@PW78H@7AarwCaY5;FkOEkQWc{r}U~AIH|0xc-EBM{L71G3Q>hi=ZkFCC8aZp|A=w_*(lD^(Bz_mJn+8) zh?%pe6x-Ye!cL`AXXTE$d@Rj58khbhr4UGQ1ykqED<~^2ub4e+(u~s3%>{GP6-~y) zBq}H=Ew2a_%$gl4m|Y?&Qpd5OvKhZuN!xN`C+CeT7?)l!E~9`#2NsMSgtkTPEBd2( z%g~sk3Z@jpTC;CvKCLa;A=N-I(eE+O9Z)o*cv5*Ts@YGfiK>Psg;>jRR|DDKHdSfK zAGa*!2<`u$E7;man=W8CoiO@8w2zcVE8DFQZDrPio&b+z>%s`pW~X13+Qo)vNW0CX z)yIwdzt?i~*x3V0W>i*8bG+>0^5U`?lZuLSXU+d#y z?ijkDT_^T3_Al{-TsVOgol%holDPi%d&P>z737W`PykhJJBa7>OD`B(P&~5?Zi?C< zY|Jj7G_{zGI%}>N2yXB{d`P(cFQWdXvr0pM)U)B#Hj}77^v?r18p@6{6-do2YWM50 zp?;YKVJacElk z?74OWD@V5hI4kTl2hIvO#yX|5ipq;;7S9U7Cx6#!WPk`apj<*6UET2vmr>xDe_#~5 zZPA8#`4|Jn6^zZG<2xZL8af@mF*>yC7K4s9sMM`}1Q54Tecm0wX8(@>#!+r|Wk^*t zW1xg-42IDt?F=K*XEM=cU^2u>^!)a!z$-DPiv0_~48gsieA29`#j3+)Vqln6$uSq6 zSx`I=10*uVZzoN`P<3@FxQ>)ndLdM1Lw%^PZ z!~qe<3W_F$il(8|*brjVA5I4T*#9r4e@xCB+rI$eiX)!zqEOqolP66nm^1@(k@n~S z8@n6A)%T|qJKp_=9)t`uj?H_Fxc zJOx+Hn#;+?nBt1cnZ+b4vdN|vW9J(Ys;KN{PYLHa1ZEY@Ec29AhAQ~QshbD>O%r=2 zO`csI0?CxpS)S1B8MEgWm$Sf>$}-f2^GU?sl1ZgAJf#(rLZO?PHv>HFdZLTWD@C`W zL1)b_;Pf$6I-86tF+nT$$n1mDOYmp^n^`Ro5G=EF7xoVetEsv z9VD*K`kHv&%Wyur>(@T=+q%eX$%Czi_g$U3j}U**TgEZ+oPT&v)U~R)b`Wci}19`4qbF^eyKz z&4s5<&Zo?Uch>OCo9DvA(W9R#7rt{8>+vjd;k&r-wJ!YeF8neV-si%vaN*f+oX=A( zyt9YPywxted;e>#3xATs1lL9vzN-uWx(naUg>QD@Pj=yVy711u1ZMAX;Zt4orVH=f z>tgmn7ydLCeX9$9x(n}_YkM)<(0#)%*@f@nqEB(*&v4;WU3h0tjoE1~e42~Cw+nxk z3!m=7_jKV0x$wPQ_-q&6@4{;?{Mjyiz6<{+7k<18e~t@Z=)(7Q;itLqeO&l57yeuq zex3_|o(o^)!aIBM%wFWe_jS?Ny6_jc@XK8I3tjjXF1&MZjM-1Q@aZo4)h@iV7tidq zE_^>1eWMG{y*KCcx(n~@`7*EBg&*J$&vM~YUHD5}_%s*(QWw6r3!m-6r@QbuF8m-DewYiN z?ZOXt;WZaN;KJv-@VPGhco+UM7rxMiAK}7JbKx}?zRZQsbK&Q?@FQLLDi{887k-fo zA9UesUHDNh{4y7QvhT@62qZ2+NGh+S7*5*&h<-l-6n% zcn4ulS*%8Z-yobwc(uSA33G~Ktq}MX!kn5|wF19Lm{Ss~O5kS*a~QMA1b%`rryy3L zzz-4T)WgabcnM)nm91=n?R^Wbw zIfbyQ1U{EArw&$`z�b%3u`=d>Ua+6|8)LPbADKf|V`sv4lA_u+jzYK$uekD^1{^ zuK>)cfR!TfcZ4|wusj0qBg~=SI`|9g{~6&l!g~b%kT8dOt6AV3ggL}pjRLCRb@GFEllv}j|zet!vxK$43YmdO!5$2F; zH48k3Fo#mBQQ(n;IfPoP1s+D2L#MSu;K77BWLmWX_an@q(y9{pT*4e8tuldo66Vlo z6$*SBVGfB_zQ89E=1^#53w$hL4uMv>z#Ry4=(ExU{&_TD4tZ9Jz~2$(P-l4r-ba{2 zoOSSsv_Ii&!g~b%kT8cdt6AV3ggKO1jRLyV zsIsaAeuglID6351CkS(BvI+%$h%kpFD_`IxggF#h*#h59m_v}2F7O?MIrLa*0^dTI zLynaqa5-TPHI_%<>4Z7NSOeV!2JkwNU*8|K9?|u0;^2mo`g9BScL+gMwp@B$`|-V!VLLVHsFl? zdHVJnH2pKJ`tyVNQ$UyLPTRo3dO(W@&1Yc*_~X??T#y`eQxs~O)0Cifsy%iJ{Q46V8`LDRkS z0k?JCB2Dx0JHT}E# z^hbtytOpTx0Aw`Qe@5_bfF&3B&J2?s?4$oOR@1+iwu-d*!kZz?xRXYRH2NY>5~`*l z2rUqZ#vpX9Kr{%UivR_ThZ_OrMt1ok#|eBtZIc_>;fw4=B48M_P;O+KFS1VHYT7B6 z|_)@DPo>ww~txtY*7z}4FM!#H( zcJWy2K_T`*+aJ-4OX+r<%>}I}*OUkX&je*Z$*)r%lXV?rfP+Cjf1%Kw5ZYUd zYzr9U{NsHOH0J3)81wyu0!GN6ui>jK;9C>$PeTW&Ob)}zA4OfX$d1r)0iy%G{!CA0 zQMj@4I^q(mJ0y&XY}53XP-32OYIR$JFZ@r?qh&^Fk^Q0EQIXF}YD1UgMh^KRZz3bl z80u%cdCewP1^#^it(vDN`7;_@PF41+PDp5Z9GYn8OAkOE;)2oLPtzm^jVb<=AX2G8 z5TwCKy*Jewr$%;#Zq2D`?duEIQd=YFFVu`~-LGU2KLoFO2=l$1b^G8}xxV_wpfC5| z_35^IrNeyUUM&x62m#^C(8AERG9}NrO#)lTl3Me0(RH^uTy)LFs0peJSnhnkH3`W8 z8Z4U;B>WC20>+7q9^v)sLZ=;Y@x!A2Su5r zn}*MmtKscG0{ZER7`ggMYB)#23+-eYlIPpWK}hzolW-XCpX}r`BzsA+x(fUN^{}Bu zHgt*&+3n*!!G@OEdB@q%3LEm;&{KfeVX~pqM1M0o%}z~!lYR~7p5Zq&{XP0MoO?dH z)55P8N%X!pNi&WarEdmuqK54JfRPa}N;{)@Fa&qP#LpKAVrdqvneQ(Q>TTwuXeMKL zidM7S9|Q-j$vY2z!%vg<5{|->8j2JrfV#IIQtT)B=rt4b^quSWU^)593$SrhSU9kN znTpwFbt}fEHiV|?))HU%L7?q!x*)&0^)g?06tu91!Fw-+;Of?^oiGx97+G-Jy`y@0 z(1UDG8Rg5}tZ%Sm<61~VY^;D^n$LgR)>hp*%@>XclhTsf%FTH-i`PP&R`Y;;sZ7;s zZcCx*vYP__L8Z(?BAuby`WEF-pwTX1Un(^e$c`2$aSCuOKtXXhT+@K}1YqUV09qsk z7#a7qrX)XIGiZ3z;pw><+stVQ;|Lz%jj%SYJ|6BYzOEU|{o`qO<2ipJu0dm^pG)Z= zU>?K`8g>48a$WDQ!c|+inGL;(<8KM!BHQW>9KP0Z&3aVZu*VxTs@N_;ee3tDd=Z|Y zMin`*!zT8nvJh1qAFU!+KVlQ|`M*_>{mM#Z2iM;>uR(~n{)SFgQVU6|ZArXDB}{W9 zaL|Kky1^G=nv6pkZU0>!%0aaxKdh0sd(8_rgmk9t7b z>$WueQaKN{D*LTe>TkV`Iv;J|Tx%UCw4!w7Ah2k*S@81PqNKM*!V2{FX&A!MOvgj? z(FRWB+N)pL2b?%{MAdouEho~;PR!8;P7Bv@XUA58MR(dodHHQo(py_Ze@-{o9pe=B zMJ`5{L~%|~A(@xo7H1jjP87Ezm;H_L=p9TV()7;$fFAHCY5J$R8I1uw=ud`Q(L!`D zne&@me^s|8gpR>boi+iJq1_ly-$dMMppTgg_qVmx=B#5nN5VnIHtP`%es*BA*B4{K z9U{#i;OgYnLHL-FD3>ANA_eSX94|0BVBVwYzpzfNoM&O4Q@;b%F>eDkCQ+C|h0etc zDyV;HKJz_0@os&KS$MFmO*dob8!{C`I3CByuUmEqUM6GqYf_6A6l&Q9;q-2g=aqLM zrk(+?;X{0(+4r@?jOXTu3>wCOCok>gsk+32U~3F`Or^&)uyxI4Ukk;33LAv@2ax96 zDK%r4*-5g8emo5^POILO0DR@X37aw+8$M+fZ-G4UyxcAwdQPwyFeU@v@@T`CqLn$0 z(5l+nhV1GhkM$*3s`jnF;Ym*6k2LVkxcM<<>7Q`2dB-Ha2l|YlCTu6K9CUV z2ty@feX|h$f>rUi!q@x+6tDEA#6t~fgR?#6DDZO-=|i#+GQ9U<$X|w2)@BA}HF@t; zutT;7<;dJR0r|D{WH0{H66XAXYjI4PdyM16p}HkiTX=-GtSjkrDe#q=TT@U3L*E9 z$2jws-Wc@Zq&f9e-t735$5U^h9i$O7xaL(bB%GDn{tP+wf00INGPBqOHJGen{=pHF zhSZFF%<(Y^{Sng-&6t;}W&Z5D6D-lym-|lU7qF zI++?YrsmDme7RFCbKyQ4eXWuvBfKN+L(zS?zg5(H&W|G@7y(xLljZuHKSi!}{#3cH z_ov~C=>hGCMqlnVB|CgFduY^grb9gpAB(vg&MNH@ z&b!TiP~U2SyK^>~GOA`a<`W0Z^G!1J{yiJ+V{}`EOJ2=_?3}HUJunVXT^=T?DZ+@& z4{|=`RjW2rfrjPegXBS#)DUJd86Rc>*V>-#fg1d9&`om^y(OUslTu_XcF-x~(U*yt zR;3cDDRdNI201j@PC{eCeAhxfke|954@}@XJ_Xpknz`__fWMG##@e32CWI4Ul%pSL zMqO{#z`X1$;>Y>3>6f+8RQ!@Y`8BO)VEUTrD}$WS#nsC zqcYzK>dS?Z<{P##Xe0;qZKE=`eZMN8Z}lIP;p5wniigN6H=PdfeV=wjcrPn z>1aT+BN_*?Ba1Pksee4$q0(V>>lto0)Kuh0JM&-=Tb%pQ(2dj+ed2S`R}8(8w)UPH zYilo?AN@k;oqn+t&feZH#z1NIi`xZT=Qqg`tWKjAr9jH443`mA*ofu}=p5GQ7%rU^ z7w)~YcO6h%-u)7!M=(oi7x*b_4T0RPCFv)M^l6e#jv-y6NFPwT?N_9?le8*E0Yi~q zqDWg5=~$BXi6Jdir1xMVM+LrAq`gTxmq_gRVChmteD!{`_BX!BDT?TO1_nD{9NNhr z#e8gP{8!IWybehpT#%iIJyvm?=Ox+{_yP0WFTj%%e$=kzRmS;bYK1jtJ&K7$RKKxQ$(=^GD7ZEO6}bXll+ryAbk#Ul!t`3iMvJXk0xaO zUs6tJsq#;vy!9xMRtU19^HHk=P<%b-(hcl=;pgl@-X-qG7;!U{xZO(JZY8ci#l1ms z=isY-zsfp9LLGZI_Iy}wB3hr4M&g0hijFQR1)Fj3nz-_h(Q>eg3=u9hUy^{~SY->G zkXCs_g^{n6nl+z6!$m+^1HcFSiPk&-we9-0&(B^dyn}<9v%?g#y~O+V=9O3|Kt|*n zFDF7TWpN_pi%jZ7=xG}YFGLSzoK45{rT#@mW**!$|NLoNTh4l!+wJ0V1EbM zFZXAIH0L!+q_8wVA`IyiCk?@g5R5TruEdYsW&_kz^FWp`+o%fUl9?^{V_#%2`@a}>oxaIqRcZZ}t_eC@stna-b;h;5vP>*{E;kL_qKmnu2 zOev2|)H{>`S5PE0JjT}WuS3`+cbh$I4I2ibt;|BDp3ur51@*p7WXz*u8(;OOTL0u_SA1c)0#`5dcf%R7x2XvaVRBVbfFC1TndlG(%ii19Bc%d z39_v8h5Mj%5TUS)o(w&j$q65(cdB$=%?;yOy_Y}6EY!#vG_aRvYxN#d<<;Dtjrr#V zz-Ud?&SZ0I^{31PhO;z&@{AECKxeL*S2LTSGtETO1M&P&Y?Uj`eK^7EOOqTGIj#u~ zw8Yu7&anS=r0uDYJ;So+o?-to7ySxX`YDw*uh>ny+J$5u8uoLez@CcEkws4^l|naJI$`Jn)GpO z`0X6Kw?mWeB>rgDeZwcL@NZ__Aq?uk7CIIE8^{8NcW5Tv2RmDbVFV*93t&*M6Hmcb z0j8}h#Z!y{y^w=VgtS2*fMYTO1TovCORWFM(^lJzDi~chf>UH$Xb$7l9+W1RPUsHxysg1#R0)q{FKlL}zAvc%Nv*0Iy7(7~tO?a;AaTHKS~B%$MW_E#r# zfW}kLlIAtArko}zsom|(av{%8g>jg5pl3Ar+NcoG=5+hiux#$pQ0f(PX*5irG5uW$re6eTbbQsdB@6w8d`ozMa$rcLvBF z2ROJ5b-*;R$A}`_S1m;2P=69c8~H5)KniQehv+p#iaHK5sE}0ODIPAvK4W|JXTd_O z{&j-IZiA}+KqzbF0$D3PK=q@J$gSTj!}tNS0wma%>W`Si%KY#FK-K`utpnzo_fc8y zf!DvM$X5cxocU@)Vh?#bZL%dssSi+XD)oX=>P)57;}BszBuFgWSSh$bGIeVHt8LNU zprA6RC?P`R2$?S>o$a!a5>g`AI{#Tp$WXAbt^`uuuVN=ZsAPF?RXzQI?GVqN{*~-k z(e^^+2+KSQO`=;Y9hI6%Ji4xG6Lt44cf)g7KxUyJ(LU|;$S zmLOiBF&jFAR*i~T0VECAdW6_Ie_thL8pZ4d67$1$>T*TB3|q~lK2A{&CUrHCKDnPC zdDm#FxJ!?r3t6zK~jy-bh} z>3T)_lj3^1A`O%D^BAu8DbmM&wz;08NUtU7-9SowiPmqjBA=khIsKtCohwQR(oy1K zMf#{}&r=j>+ke2B$0ypY+*y&{ph&wZ(sxOk7eo3fT#r^bLy?}ONS`KYCn9l_fS#7- z>d=uz=w%vC9+>4Q?od>@DO%XoP%I`mJf)EC;1JPR43sJD#Y6YD*CksiI1kHSdd&Gs z!DK2}BuK|X-4y9$MS6iE{rEkQjv^9bnkj~Law66vQkR(Vv9g$b>=?8GN5fwsi6z++ zol@bEF_IsKJJO2%mE?1k!|7f=hW;oHwkL-tLZjsH;%3PVXdG9>=oUOr;w$&jF;$UCS)%Nb?WIm82Sn7w-WbrE>LCrcESgfxrC@=_%78=zau5{~PfSBh4y>1Q79B#p}aqOzE?>%6ARY-Q;GN|}apl``w?X?t{TY^#({ zm9BSG8izz|y(rJzMQ1q^YBEo4p?n1#lwCwUdqPh(`Aaz9WL1E!6{pHQTyD$?yFts#;vcpVdvDP0$csC(zcFk25B z)4k7A%uZ0u!en-`;!zmavBIXrY)AKbipLHW$xl=~vdQB$Ag$@JyV}2a8n9SK!LP$U zZunA=VnlNGL(CSsKLewpzaL2#eak!6t-!cFOx$8Ki#>o=uqLZYq{mbO4KST86Ajld zp#Tj*H=`dMt15BpcJR0qNV&IPD~IA;@~slZ{$LFIkA2~#F*X{f*x#;f)LXIdPWGil z8fxFb-Bt}T!$gYW@w8y;{9P1}oz38}6i90h7+LS)3&si~_`&~*6z}|a{fc6E*TIKb z{ZWdyPw}2l-h;^dG_tL=PE!0<#Pd5ThTjy$@0Xu!bDpO7olkxykk%WZ2fs}$hPtng z=eJ2pIzHA_@%y{t*In`Z=xwlCMt-;0{3>v9w@)MVb;DzoJee@MZOuF8?e4G=zDs+2 zqU4>S@n>TgO;C&;P>j+Pqh4fGMMh`Pu+jG7q4M_Q%C)>TkE4ll z2vUwD?%|dj4@36WG7smx#wBhjo42|PaX&QUP+p&}8SQ@j7y3l?QJkdS6s(z+8X7DM z0;~cKm?I%rosA3Xy@qcMoXr4<#TpM%8aSE6bAvcCIK!WYd&0dp`PSrm77b5rd9%Uf z+Uh(spi4hFKdK+r^#5r38@#u?2r+qt^x;JNma>Z&N%wgJ+&AKR101zPZOk!HS*=+{ zpi^kJO1S$Z2NYsB>&eeJPA^ktEq@lOuT!n+5IpeP4UG4F5pHiy{`Y+?+=_GKdp9%|b-f-wGNHegB zVT?CML0eSubQh8v?2NC+&eX!d>15IbI@Sm$Kr3Yo~bMCdL9o|>n-e2Fed>3mMcuI zB2m5~vfgwNeFQ)eEhJH`BHHXGdbyowB8i$6(PM6+CGA8PlPCp?7RtWGO*FHe=tR*; z5nbgb(%OmkOT$gX+{bMkp0NMQZx`Et$o1z?THChVeTp}_P1HGkk-v~PkL0?L;fG^}o#Nal9aXxCWAARl@IWCeVk;08RmPIZBpO)tK8#gxy-vysj_Gp@`FL<`R z*G`aS6JSaf@&`%KnFLQjV#^EoDyePxbIW1Mg4N{?k5T}mx2Wpe&-f9xXGO=U3XCML z#%-v;Fko6fi}H##UtZJsLY(ItRTJ3aJP(q``|}Ao)f*D>bWFfc$g6I| zarh0%xtSkS?#)g3D6?h3bAiN@84#KS=Uv^ORo@xV=k#Zk-@_=6Tl_p2jCp-)K)*VD zQ>5+op^(+*h$BfW+LTulq{IMY$nVR`$^5BuuQeFf>T|@p2p6^A!T6eUl?tizaC{$! z309-#Sbq%VEm$4b@itOaSJik$WG$nxUNVCeD}&?%UCKU0>-gg(=hI3%RPWd zxvV(vk-Vk^gg04glDCnF1KVjxc=X}@_2KC_*02@;pMJR&qp$3mIePHCd2c=ceBYXc zp*RxH*QyRZ&letmO;kRKqAB6NrH%Tq-kPDr<)qs3aJtu_s2VglXiQI|AdfE>+K-Ct zXC*?%V#*+#o6SSI< z3Ath1K(T)lYpNHfOy;nBAEBy7!L z3F=#;C7|@8pnsSz+zEC8O;_BoNvoRI0o6V%)t>L~?W_OR{AMdUANpzdQ($v^Yi33* zGODIBfosd&LA_;^{$o(TDJ|fu|01A&nTsKF4a!yFmF>Rp94IavZbKrw>X*U3aD|;3 zBL3+MPXSmp=o+miKLP7IUpNa*Sv7bto=}2r&NKA|BBQ>gDio zyPb3Oqgqo=8g%tod|^501IDmaxreZrOGEUk-o7=7L;Hr3;Wd#K;;P{k3G%EfDtNso zylc@!|7>4`H*ZF9P7^>ZGjJXZ(}uU9+9OaevwA*}k6C+{JNb*;{$B_K&=C-y>%v6Z+ zg_qELXjBb`DTGGQrYnx8oj|A3jGW{lxyimT&xlb9<%0&+5owi6K%LLlI}Umy9Y8w5 zJ9d~>=B($5e=`fx9P8yJY{AnhLA^AsDJPlrXqf^heP$S)w1Y1)Sqx~qeooD03C7s| znPb!SMEh1=W^?E`nGL>ea=8OT4|VwJ*Y?*>bv)G=o<3xFe_zClo{_VjR>D9&FQD&n zjpa^3Oj01wTo{Muf` zt8QAIm|)!k-T5F(KpusAl=gC>ls-AxwuNjqa5&HcnC6bN0$|qg3+5+gGK`e5gI*lr zKBsJUI`u*9#-trFscAUR=6S3|pfK_gCN~(_{c#2?JQnC7387T>7NGxa>ReekF~qni zkcm(s6F>xsfR3%GuA3H|*)q^xKa+j+#^fPE(x_0J94f-Osj5MJD?hEmoHMj5S{b8X zlt}Au$w4CH`V=km+7w^pL)gH`Nga}#>WlmvX{!idwYK{^rJAzkUS9hm4>fY~0>(u; znD#{d#6V&yrygu0Ju#zExI&{zpu=*_87ra^sLTIe`9 z^Eig8RQ*^QLc>FNDRRP-t2kqLDmt0o!FmV!vO6N&;u)y=lvUXDH?kf+9eYwn3Jw70 zh%eiN4@0}vO$tDffN@JQ@7_lW&=L4B82sQ+2x1VKwPoPBp%sS2aPkp!_0=bf67^Lg zK;&f&fRrG{DG{_2UDh!<1+{mlZyK3{8nB`&C_#VrDLn@#d_^H!Lw#7f^&i=buwlD1 zpqgI`(iU5P8V(`opVnU>s<4t1A=~2KS0M8{tYd_s?G^#OEZuqm2vlW=MAOJW(G%^_ zENuI{9VYEwj;LvO=AdyS9-p}}4ZSFr-ROaUe|Xh=|8RsHo<1C!#aU08FMK7t4WeZk z0{=K5)^u}Vt%KhwASDxC~K~F<~K#;yzMok&9lQ!CZ!MTh01)4<$ zg)IN6y*%};(Pr5K=XPAe4ks9zF#Sp64dbR^5>BlX(RCc78^^Ir`@%0lCF%TH(FTWs zW+dTZf?+8&ISKHGGZ8LO%b~u-++z%28VK*jsgq|=lKtuym_&vH@j2U>$gwv%dr+UA zrVmT58iL`}ClOa(9H5I|U_1?BfZgWK*V>JPnF=4Qq+(VFFc#mkwum3w?r5AeWE?Fc zJJhI0x>mH&=}Wd{a#CuBCFlv(5GjShH((T|T6GduuS&_wyav(kc<71dx5Hfm0P6`w zjqrgETAwd%mo@Pe2_Kqq96DDUJxUD>5VC(wBWD7l|q3c)Ml0Q3{k`Zf8g5-!uwv87wrlnauAw)AyM1Xq* zbV1#!--%+NoB&GeKGel&5~r&|lN@7iGM`563wK2)2;d0A86q>!@YR?7P2%N^V8@3} zPclz%$&78Vb1*PR?SKZ8)*9DJB-_Kt`jgU015caT=6b003)QO0#(+3H#Y#r&@;O|1 z>kg`6N3^fI*%2*Ov|&WM8zmiK^yA*NA3}v~!p$RUrmP%ny)P!T{nR)ilMb@D1~n#s z9-Fu!rX8*mx101T8QW#(c2pkI)zUO}hw!&(S~c+BqS}Ww3;ssGeXpTrT&Cw#FW`7q z*+#a}K9a&d(yqTP`pZ{9qXxQm1t=SX9pWBUN!9UK@iq21_h;~+x7q{UzC&$ojAoIu z#0i}7f$xZ5NWsNpZ3Rtq{IWX^I>!cD9%FF_J<(c(_JVH=!?4y1fyY-L^3RqTyO~PE za5i)952)HP5|tRCm96YII^?8Se?m=jjj5^DwIU1+NIzO(`%$m1&bYe`-eN5fRh{0- z_~rD{J;-sLN5nlQ!@IQ$3gfB2!s;SCb-D~TH@s_r)4jD!W6I6pz?}UpdRn)efzrF( zμ*`2aM%+dtueMz^VwvyeH}c%oeH=i+%CEQIk?RmL_5fLnOXMQpKZJif#IOvn%F zo&EN6Sr`)}G@D*X5*|)IctkVJgDMm23FIfMvPpingn6#RUDh+qXIYim#mRKG9HK9Z zaLyO}(ep){VH)%Hm)jpFN|~L_Y9?D2khRYKbA9{sLUpWv1It;5Kw{4ybFM)|iZ1CF zeyycqB=p{a#^a~SJ02-C5O$lw{;HGUU^Vynv%Bz$GZsQI8Wc2RlXqw|bxAaJzDnik zAH!ZUpvzFsuR?xJ zGyl>vOl=M78)?KE4!xO-&>IW2J`<_y0d4zyDiQ()=xOo*T20dFdHN@L8@|NjfU<~A zHS1vEyqa7eDa$Zr{}^>Z^Xxgu{gJd_4I|B0=Dna|x3X)9$Av%?)W6HC>HZc6#@Uih zLvb1tO6GpILFG6X9$6FU`>R$nt{bcXq2pkP>cM=B0Y*w2T{Gh%h{CaY?oYwIwq>#J z(-|A|uysy#TQ|Hh0Pm^FE86W_^KACh!?^tZC^Wh{YwC$;b6@yBobq@l06;9bcM_ni z6JcmM$2GT;w#>I?Q58%1>bKM+B~-UwIQK-YsM)vXjup=g^JKp63;)1b(VFVCXNP&X z`to2h;9K*$Ok^hd=lRyu79xi}kg}EzgdL5~0)6&`cW}ym{w7~|I|^XuDyZ+vHEv7B zlg&w5pN%<2ZfD~dW^KVIeilbQoP6cz|`BsF$ zZm^BM8&h=LTTjtYn0q@|JRv;}Puv(c9n`9~rkD@aIR~*m_?+tPHj`hX|KouH;UK55 zDoF(OaJGoy4%4i?FOmqSD1=CkbmjksQ$2~$c!RhUu37J|0y6<^z)v(p#3 zOO;vi8*X2zoYisMLOt8}#J_{1c?J-@$BWp8L;GQesggN?Eo!Ktaa`oGi zbM?xkYV&ko!?ASd`a3p&pxwa_`qtbj4!*$`J_8PpU9DH%rXOSFGp&Oa$GK6YpG~L<@3HZ zt3{zhw?9cW$;N9OkHf&#TT*C{c{%!FMrS%SUV>qG??7gb4k`LoxT3>(X9y-Y+3JeH zpgJo6(BoTtJgQMy^(wxlv$^Bc-haR;7jEWwf|tLv--uyX&NV7K2lVyQNCBMKW$Rc& zG2z0%==~DIH9yvqoOrO?q4BG|-vN#N&ZYj3*$FJw>$n?^bQjM|7 z=<*B-8{JmmTb^e(2jcx~y0?ljZ-cWXuxRzp1tLd(zj~a1B1U@}Dr%u7?`UMoNGNX! z83!m&&xKI$m3A`O`U%t9ChwmVomC8K#I$;v-uf7M`u4*gSifM}YX>IOH}FO=Tf{?1C_h_Fyd_#8+gdQH6@V-J`sNd(FHJPr$sWP{jz437-3$#(&$ z7j}%9%;~n8If%C835t(M;Y7w(aBmnyL)`%Pz-vmx$7wK7vN)|q0T@rQurqsqgCq<` zX0i53KHbh?q1bqMfyyD9*fJU)+XOdOH#Sn6dWh=%v6YnDV=y~+4b)d8fvq%UU zj+A^Ihl~;Vt>!;Twj@e+@*ncMhlHh3!u@O)Mig;R2p$W;6(+4bnUt4;QVq+!t$r-l zNPX9^n5b$FSx+O;={Q_vn6JKyVq7cQY>kUF1(;*HDwDS}_>--&Der^;39k!g1^zoU_KF)55LHfs%;sKV|p z*2YuW$RJ^iJA~~gvYZC&0k&+wyV$kRfW1`%e(@X{uw43^3{p~|MBHOW2UDvzczepp zkW$?U+-^>rvNfaJp0j-UY;1qpZH{|}0`S~8i`XZ}H;@P;mZ$!E6+JH$BAjExFwA}j zPg?#PPREi6Ju_R)S@wX~^nOD2Oa1?%^@JUx7-X5Zq9*K_=@bl#f0M%LnKvNQcD&8- zSZQXNwWuoEJ(O%{8w!H+1`UMfKxuiNx*;oYmuV6hm}MR1gco>V24{+R;_RljW_nXc_H-Maxs9vONGh5^C1^`P1C+T7bh{s`L4 zb^Aj=#a~jzw(SeDI>NC$=FxQ-o1c+g6qCYG>`zKDDeP8kDqfdFbCWPevG7$fo$NiZ zrBS6n!AEe*h(<9V`5TOSZnTrZT-%l2E0o^5KtXe^R(kKC-p&-O9+@is$LKAE8UI(? z>Pca@-WxD=rrsos(K|99Ihl%Hma|H0-o`9_?D3`*}2jw?Y zdA2o}Pm{?%sMF0;SO+63pVay%ZSPUC6oH{#T7|cN$GSR!>2Y`qZT6~Q>-@WI=4RbC-$B|hftnlM@l??1G))vvt+FvJhW#rf zgjPtv%%(nY3iGclCfx|LP$FU4*QNHTP?>2q2|<=e>5GG4-?S}NtPm80O^jtDdvWWE zb)kq@eVc84o2l=W--^CZweW@V)OxA^6I|;Lsn~MbO0viMX9n6$AVe;&k5oa4S2@qIrU{>@|3fcIo~7nq z-BrverQ~Y`>5$G*q`&ua5XV$Lp3gE{(9rUr2Kvw?%2qFF1 zA-qCE3hp*vV!>@hLVo&h;AU`+lyJx`wlNuB-i-rkn(c08n54Y~>5$&1NWWJT)iV_7wIuxn zNUMab@%oQz#V6RWJk}V6J6~`f>jT)s%8JopkYe1Ty7-?IV-Feshe+Ej`Hvz=@1fb+ zTL~|x1X7ky&mi4Stg8SV2IE9C-$t@H0yq;06M zbs>><*X8Kps<2#`J#?-rgT4D5X+W-M&(F>qUl%Kw4*lYkNH5d7!8&u=nYz zyjNVN&VQz|`QArR^dMSFUbEj`o-eF)>l1yFRK=%_vNfP~{^CN=ENLpoWgdt7(VTq! zDors|NiL=2;XvB^n7nCXUC^SWT9c7s9l@89cMwUpK6?OTsB`?wJPuNnf0sRsBwX2^ z<+dNT&L?tDe3#!K26dGE0bWIA?T1h5{AVj=*Dr@Uf3{tygjqYNw@2gU9!E)BH zmnw7@3bowL&JeMdu!(At3mbV#pPq~FV!Ugu9%q-T@# z6G1wp{S@g+6<%7Dqx=l3Sc@56Zfv8eWDrYq^DzsOk}u$!jQi_5GDi9A>z~>#hMrdp7JlK^1@hfNiW} zlm&XR_(nlG7D!j5OB89MB5i#Lq_+`iSKbp}`F$eOsXSi_%F4eh*gAhlRr!CAO#_iO zn}e~@B(yTx?aWoeV!;x&u2&T=NAZ>_@z38x51hJX+c;s;EKw?q7HpmWS7oycs6r-? zR)6Rr&&&1(*v`o^|jf-=h92f<~uARYT1gj2ENXDHG(!6DFL-Cd>IoRN??=;p| z#y7#5H~s}M95X$RJEF1Ez{yI%xnin1zeh=EUIqmx3qQPBOvRro2!o$EO`M~@c$77C z*!}|^N%JiiY@PoX)o3@9EjJ*oz!B;y9k3ReYPSS|D%0{s?!f*$1yvzO?TIjFpD!{% z*vL;uK(mV!n~(ntHY;qOw3fvb`3(L>8%z^y zoj*erd4@&C3exE~FDufgzoI|X`7?zS@1G&*Qb9VT_bAdSiu7VdI*z1Q0BN0v3bdck zvHYx0OcN_7glB|Li_th;@lH}2U#K*0eE@6^{9^N=L%?P+eiSyD8Fge64$GSF5Nw_Q z0>x)F`Md_C{L+T%AM@RoVEe1WiLw@~^IIZsw^_l$-*CjD|NSNc+Up0dlw&4n-DOJa zIKvf+_c3as`?c^sV4gzW-%_BgAI?N}bUZoH%>nuyCp_%+!#-91{iP^BpN5W3DL;pQ zMH)H(T=50*b^iXUrJfn+Wb6KU?2*#+)HZRF6XpUtvN)=`0Ho3an4tjOQeiiK{|Fw zQKa%7H*h~!k?y43E(fXg1ttkC%dAguQRl&|GAQRBZ>oNz9K{m;^eTLhb)00`@=sAr ztI0GdNJoCHBJHk7yD8G~B;`9+qkPR`Mf$#Jb3IX!_9f{?BJE+MEZ*r_WbAet;0(p) zJvE}I3Y!Dwm-m59Ba!TkcIuVQnYV)Gc;+zg~O{Rd?ws}L!r zTw%O&Bc+Qu$}Ln}`wJFtj!<0l$n{Jj?K?JfW*9ek_XH^DHSCSFtHE zD*Os3VZ}x(6}l)MTWg_0CXiMJzqt0MrKCb}4Clmz;J+(|g(?JhRtzi2@Nyt6{;rGL zpDrS$UD2x8irQT)I~IvY&xEyO#Gy*yJA$qApQHpPQJ}oZR6^oEVhVMXLUtEBK^1ss zv2-!Vv(3Hmv`8c3-{3EyAbLxR5bidg0iks^kt+Vx*v6@iSG129)lu|*;pba?l3?rn zJ|%Yo+5IZ&IsIgsB3-2-@9|1;vw>b^dE1DMaYIPtEoWX8pK+(k zsEyCKU(KCX#AZ~EmoOiT^o4K6(zSi;d=B16k17&uo&RSw#DwmJnxl!td}StdcH&F= zx31!dCpQX%1xso2R#o_76mGerUC0O}-_r_+qGz6YYCBHBSwj1^LdPSJ+0XMTd;V!u3~c&*~pKF4L#Ui|0|^q z%1c;(ReG{2{i7;<5lfGX5tOY+&rqb_E7D|=b|MnhmnjnTmR63_`;-`V-@?6EeLkH) z>mN|;*4+bkU&~y`@zAY`^tzok>32f9+q{>gxq@^^A6BGAiuAA|Eh6b?BJFPU(%ncx z&*pgl;`?sfn?9)6$)ih9@>`{3SF$@AGwh$GNE4OhpOj?F0O>dHldGfTAvg}JUMAQ& z|3O7sPtpl7q;D$Hn-%E~iu8VxP9YLTzX*-0`loRK_>QQ9g0F&(?)Y~tvfOY^9Gn>k z_eSeDhqrlt-0s-+TY`%*U>SkTW?wZ=_63a|D*@vzs1M?jr#Ira8To9Y{GLO& zF?5`7O`~tk&`8w_I8Rvg?~>~EovL0M=D`!aqx4NV`geT3wP9(e#x*>3yQV2&BOZP6 zHDIG+Ee_ls44vg`sO!{NH_Wr}R|y$K|GvGMbY$s4nU;-tI2@=qkHX>HhB~b3hT-sD zQ16jKo%C*AT=avu3sD7Co#%~FpMQLbDs0%3s3m$IWztACa?)_5C`I?aAuyij=(dt6 zy<4SRyjSCb=f4(t|Bi(EB{$pNe6vZehUc!DMx;^9Rs;b+TeA~rvrAZ9GS5da#E&Nq zB@G_+)ASK3p)OFyPl_Q!H-?VkN%?F%eT1fUz0cV2KJ+m}M$f;a$hI$laOU5DH@IQL z`vgHZ{Xg9Kde+ETetOo;?tD$py2%Y2-sv&?^sMV*VC&6AG5W$DeC0}L1ZtlRO8&AO z&SZLrGF^j5vWE-`^)!b2@labQ++3Pqy(;1J)@1WVCWv{Oi6$D#)AAmFHLQwBb>Z^{ zVEx7CHtR89{VCJN(2Z=0VdkyE&-<1jyS*lKjd=EABbij5Lw$On&~wbF9=g#J@|j$n zHF>uJ!$y)PCvc=1Uf{hGxR$rntH#z^i%E0neEBxPEwvCao68^R*z|Kt0fWxOZ(t=I*hFE_Vo0~ zFMi^gr$3`mLkC)9BFNqRNN;#AgFIL~(x`ifAUQ8hE9%w{3E1;ET#TgJnt&%k_nt-^ zb!td^4i73L0JpWgTkGa$cz^w@rzg|<1HQ-VN9GTJCa)u1-FT|*-HQYc@gxOhU>8QoSRVRU&O-_`pPw4!hET(aK0!8|je8QwP+=|lb>d;uWQ*$FD*=OL9xk5BzbpT3$fn}|0Y%G0_W6XYQ$aA6%HS4X zH$My23~SSdcsn6E2fvS0Gb~vv+6k>c7RR`S+#`(;u;z(Z3gOl?O9Flu>+|Io3dX{Hu-zncF9#NiD#$1k07D?8vN%;KL)G2@q!~$DwalHNd?NA4wfCPy)^yz}8l3};J`$=f#4ByK5~JH{7~Zfz z-M5;2-B*+Mae$6CRWuKHP6DoFm(qvlhfVd~sPFl}3Jx^lcR{Ov#rsc=&P~Oujrop+ z?s%Dj^9z{r7*^H%i+}at@%H%_w|Oc*w2zR<4MH~b7CgFLGaN5ADo)0KN>i?1Bsb;q z0$p)WHaAtzO>4^KX=+bXu07b~4zeK`b4)H^TJC6gijB$Nw`+M0V?;LID3vn~nq8&; zSd-gfY_P~=hvZ|n^6pkj-xRtbvj6te_`|OluZQ{8B=tmRSd?^{R=qJH)2iIB72#~e zZalEOHISI#*^2FG^AH=vF5InYxIC4{A4EmZwNK7(YWNtB7?^KkFQsL7!%ph732E?e zxj#1EOZ(n+2iV~!y|zxUg6OX9{!V-Ep`A^PI<)&i`(ZikIh1Ocfb6qs`!-jmxkoO!?-0V-_m5K zuVea&c70-gzt-qxV0xh8m1nRCjwXA<1ZA8&TgF)Z8&j9w=G2v_u4W(HF4UDh#@vg>(jVPoyloxYL^oM9YJ*?T_Dw{*`Y#Lu-Cz zz76BII?4>*gK3cJ;;8<-Mj@GSO~mD zL>oPZ;(})>7vaI7EMkg&!ux8lP`FeU3PzHw9YS67E*CRb@6jI@eJfTCm#S66*4S0U z>s&S5bE&KvezI2$Kgp_Lt8Wd4eOWc=&3wucWU^#{$dZNYh25K;%7wm{!y>x`>$Qf~ z%9FJs@A?l#)pfY|8rFNLnZ7fq_Z<%ex^ri|(V@Dnx9;stf;EYYymuk7HIeFGj}%6q zqNae+J>pr9-z>%p7qYQnI{HCRq<7qow<~o?0g*9aA25g%$KmlRiL3{2ORXAu?6H*x z_`H?({_S*iM%;t*A?>`mo%B{d7Wh2gl-8qUN8ZGcylTq8Zs z+l2mtdBY;$<#jLUbI-jmsN0CT6jb&sZ4H>Mv<=0(ogN5OeXWT}@-KPB_R}i}Tr!Vpoo^@K= z8CqDCZP4cgrfy`4k8R42HA8`Sc0%Bt!pD0T(rdlU41f%oYnWMTXHG}j@M_IyGj9nA zk0IgdHen43vu(m0Av{wEvq;#HgmR9{$eKdJQ*6RiAw2XpId&ysG6}b#(rdF(&nE9w zAaPvQBfsdXaASc3na6sZbc31t3sbi;Rm0S6OjR?L#njD6H5}n^xne%8<$WAYww5ol zvTk4&pY<>!cGjKIEN+7~Y-84RJL|@17FYNUJRxjeVQ1w>v$`OQ4VGq3K`&*4ErSYZ zu$drXL(ROED(p6kIUpgRrnwtT-bd6Pf)Z}GH$5_iX`TOZvO?Kyx1eg$toHe@iFm~V zBt2+LN>q{(m82t>cTrNhTT)~?o=;iZg*LeE7b-fD4<#@4KVtJ}_>Zk^10+%6%Zf*t zi$|!jUDIpOo`}%Tb64tFrDZQ_DH8Y#M_LU|<}(LE_ye~v$#qJ|bxOz>5u(W2#|~el zBYt3Kt&h4s4^C@Sm|CfLR)Lq@<%WMxDNFtG6Ya);qwm>OIC`^Pg&$<T$|rZu0IkcIIu^LC-U{$Zum-dvCIjKI{FHBG{TYCnCl&CHM0#xpGU~|8 zq5;Xmd^C8U24En^M-nd(mTXWQCGz9YEQ{}9zP23*3TF#IJ-`+=f8sNJfo z-y?JO$aa7&_sA|}k3>WKm8`(yp?Ro`RB-kYTPB{*Tk3zrpd$5K5}hXE zxjR=Agl`iUbRGUPw~w8zbu`h%o(96YtqbuT7$nKBEEfJF?8| zh|Fs5*KaaJ>)r&?h#~7iQ|*11c&pP-LhnZ=rgRDBJEE}n-FF<~`#_9w@p2F>O4N$7 zE+eJUV>m9>R20U6@6P9WKL=L(ZFM8l&$*NNAaohpbf8{Y3cYy$>KK54KOuGeW>( zJ+v(Y#5ocDAzlyTfQAiTxUFsIpI zCTa=IqMkKThPA%+m^DCf11=(O;m@8loQ$vMcfdFLJ45TupR4Nb<<?mR}&4*speobA^5zHF&kEY9haPXh_N{|We;WA==L@6cv7M3 zNMptqo`mguaRc8DcNeyf;mw_uH+RuV2F8iqUL_+e-SGxfz1#Buw4xoX`PQTq$N?J3 z0OGl%^OLb(oC~5*0m{Iy?uAnP<+RB0Kf*tX&||fl?sy=g2j?K>leozoH2QW$THjzL zfyBrCjfO#N)HPo-^X!fkX>?1KOBV|h4eL(EW_`Q03fJw}X1gu9rvC|A(IKgww1>gww5yeJ4xQJcA7 z7Vc1r6p&Cxp!rmkkijf?1KcqRn4Q$9%`nc8&e@5z7TIcqjHj5L%=SJHoSejbdOPIw zW8Xkd!kQm@EYN}z7vM)d!BCxnJ3{kqvmf?;%bpsd2Y&Qz;&8&DAHyi0)QP5llG*Pm z`vL{QrtjpK{`Y*y4A`>XLL2^bQJ>i#k)@s{KI@8s;v6 z*b)04eQm!Fo@1Y zVyqH>T=7U;Nft@aX9$$-0cfx3hQJd^;4vidIKp#6JuWvT_C2~o z0s2>K@PmgYrw0ZGy@&ZjRc3{vGxXcJ09J!< zP~!fqTY+O15@WCP&rJDjiORbslLVlFj+YD_yLqCXs1#;f^sJJ`_Dp|pU`Q+o!d9nQPlqniFp9Mb&(9he*BjPQaGjy2*j-8eUUhd=bWKeXK+S}|`d zv=k`HRxdzipol+!_o*O+EVvf{M_lgap1QxarILWPWO)>w`yF_gW+qSm#^i5NHr7@o z43D?9aNxNOI4gWrttvk3mJ(ZqL^8m<^XN1Ahbs^R;m6q<^BTUb7+ldLe|nHVyMe{x zPsMwNKR3bW&@_LTFE#=onm_PY-o&<;bgcp$6PVQ9yGsBU!B=b-iq8=n5_ zdh&CJFSK=@kFAdN0#yPrl$^5d@Q4*jECM^yEq1KENwed6WI_Fa3j7^JC-Y})8X&*M zyBaQjw$BbAFC5!t-=(mk{GnfhLwH$3TC{jxw-g@V)wLs!>w@#NRt+L2P60X5$MKOV zD~GTgT;WisdO{qSGz&n?_ac7v9334mo9&m&liPi4KTO0H6)ueG9myZ7V^DKU5wiVo z;0EBCL^s_WJrT?jHCR}r-pi6B!?2tH%ni7LW4rPJef(L?U*khl^8HE|vm(;qHZ4ryG?TJCki#(GMVk;M-21{v$XZ@J8_Ky8B}V z8X0$gG*7zr;|j@VuKgGYYudT?<43&t%(Wl6$=7~FQ4w?PM{CmxV{aJTC{J#ZJccLj z5_mF5B$0Q#O!bZhvShY=B)kx|oCqSxmM{uQV9SH1z4rk?yo@$anjcC!v<;-kA(H0>1^)swb=nwBFz^%-z%qOA(5wL z1z`LXCb6M^+!saYnxfydP0S7P7g!| zrUr60ddl7dl*Ja=d>Oy3Y+cDg(F%NN z_JvC_2)ON2&<|sw2pLs-g?Adiq%M#%t2h4ucbR%AS6qnFKPbv+;`E&i%} zeG%hxpdP|>=DT`kgs22y0tVa#06{pB$!u95lM5mjkO`>LQ6ht=!MH_~Uzc7a!D5ku z;K?Lh6O03c;6HxYp?NhhF|GPKbjO}lMUj$TQky8g#Ll711nah8TtYe%Ab(t~Fd!1D zwOpj8Wxsbwzf`9Wyo1zztX4phUDb-CI;>WAmV~yxOBlM?<{O!)+9elqgDzy-+ZOLk zXHDc`4L`eicn=DTLqRW&KR}xCV7EqVV2r7TXt))JKizn_Krr-z*k=^Y?3swxvg^*(Mb~EcoB4Z{C0FzoBnB zfr)mDqL(zTZ^jZT+#mV|fj~LmVtc(y-^iWjal~}poDm%(7Ga&LBYUVhwy3Ee!w^L9 zQH{EDfKe7WkrqxR^n&5gIa*H{YAN~S_0{%soP@CsH`Yn-FVCDij!R{jq)PJRERu6YLg zSh!{7Y^&EKXq?xW?CveSP-AjyI`D|j{8Jj|`iZ1d(>xfMNb|M8l&En6_>!P;I}$4u*vcv(UWnK* zXrx+$NutIXnq1g{D69#M^BgcHYaHxHL&1769{Q$D175z_y{EppL8xi7?R9A~=$lkP z{#kvaZ~ev1Pq1&OZ?>zpS4$wr`g7(?aw|_RV&cMgef^n-?)?25>MI%|6jL@HdF|z#IeS#uUJ#xs_Te z+QSd+QQ=Y_*9r4(rh!9^bm#}rJ+N_}hEh=Z{H8V4Z_9PM*f<@KuGQ8;Jc*67up}&< z?SEjP)P`A_?s{>u0$uOhtMKdet9|!0Q04<7LF7}U-?x15RZHaQ`mqg^dzq}&#lB*o z9FLO7o;z4l_|F4YV;0{*0fd{3jb9dzXC);9gh(i zXgPBhE92-6xU6ZdOqOKP9lDNP*h;A5BP7xFft5Sv46T6bs}LziiP=Bm%M1Pzl6t#KlpVEfVA1xB~N9Z$# z&HikzWtIC_agyBtST2~&Q;f32dd9T~`WF)z>mfrPA_4C$de*dE6@nx^kGKot`mU0@ zV|Sv18GkEdEO08mcW87LItnECQBUc~?-bY*xz~yw#A2{QL%N1RE1)3_#;3G*=*8&*cJKT_j z**g4%Y-IzVA!R;;6jLT}WT-CfbgB8K)GqQXnfD=TaSO6E)m%{H?lv%5pIOLS)18>MNFFosn5`d~rG7CRdfq5Js!uT^#ENdVTCN|m)O`Jr zsov2K8ETe(NLSPJLz;RHWGMs=1jgh1vzC88SO+3-Y=HT_#Qb(Kzo(hsqs{OB_;vfU>Ft+A0AJ);wyCTIn1z5{)*3*z zB^!T1Fy;u$@5)xM>4z*eT|Z>1XZ1sddQ?B8tB8I`Q)T)gRo%shs&Yp#=89kpLUJJT z0{NXcS8iRXDz}*L$I=ku;N$6moOsFQd`sh-Kj$l)>O#8?Ke3Hv|IWu&JQ;0Lt32AS;feZLGD(4jSMo z0(c1U`(*&YVQUt^fdYV2P0%a^k`Re602G4|pkbGw?pD2gk!Sb_f-~@=t^*VisY3mb zsxFlWU7d&Xb#-QpWt(nS!y(c{7s1xKlQkR}>0+%)Lvnng21--=;0^Z`e}wn=O8jwi z4a>`R4K4y=@Bqij1ic5dibo=&MBpM*3|;^s63==(ozV+lwU926BWv&l$kNqA_<-_C z^?Qc8SHFL&-!s+i3GZ3zdh?FV*?>`(Ti@As%w(Fpth*s0ir)tH6nYSslsiaj(0d?B z9*n#a+KYU08l$~__ixjWZ(K@7KtH~$AF|YQ`XN(2t{*bgME#Ji?$ZxxYP5bxRY5-N zsUQ36WJf|0^kY{GO(zRY2YJx?@wV=ikQ&j8<@ZpoMuU-y)P5Q`OQqs}L-zK3gx+ zAHB>{jh7=QbWX7C{6YV&5zvAY4ox%~JNy7b8JWC|`OX z+%YTf>Ptz-ei>j6nl5PBDL4qGxsTc}7Fhzu}&k23}KunMGA zP6|qaI3B4mS1;76vo!p}2L3P$p4Kbjdjmd(*^-ntm{d{ExsNd}@nCequA{tb^7`jp zn>PRpLDncU4SfSTx`sOXI4ZmnmM%*>7}_i>a)?=pj6nJ9hKWIX*J~Ws7NR=UkvI^Z ziok+ewdx{mKga(WuhKnF|8?5$cg%XmeNP`oxaS~f;0e6mN|xsQu(^F0dNDy!#{kb9 zgs7)Pb0m(SPTlY0NG~h@vdH$EU)#7V-{D%)D{^<3A5MnHk%@kSi@?YNyt7*|-N){Q z^fQsp_jGw*uTC)Uz0CWec;`ZUH4E>-!YN*A1_)yECrnNg`?yv;Dv6kBVPYfW2WXnJ z4yqA~??=D6$cJ3rg`dmC}%= zj>31m^};f%I+(C%jbdV4uW8jEc2~^cC9}4{Je^HoQQRE|6st*AgbQP`NLUHMBYLxhA7q2t8Z{-C^%#>ue0>bQ3OByr zfseVSAkl1p417!Jsa3Pl$;)uXBekycj`+gzwkBnzKm>;p!K*-!w-fcjCHU#`&Kzk6hCV_y<3~06>nC)diF{^bXL1jC%&4G3X zRQBS7fS3W@k`K@u60q(BKgofk4AwOUbRj`oCeT`KKsy;);|BC`f*zlM^#udE#bEu~ zfDR?-ngpzO8c-fANM_X<&~5~sp1{{j4d@yJT4z9yAn2k5P_KY8es|SQjrS`9ytxN} z=OGbR8s}g41@Z!kOrP;&NjUolrN%nrX}v)eAu2iJ zNymw o422wM|1M%knUc_}gY?lGdZ$>8fkd^aI6b{+oE|8SN3_OSeq@qdu$Ymlh* zfgDZgK7XivB7E$~r1$-j-g&8Hi6I6A&z8Ds^Rl@rZUCd zP!Br(EcNY1v5x6Z`x_LCB^4*#8x$Rg;#ENbzo9#Sj|A~Q$>cv>QfGVDn*2ZHA^(v` zj12)2@{=dkx$FBYIw$dR^&DJKH#0{q5@R*^Q=vOAL_OCk-1L*Og$ccSD#VXGm?^2V zy`LKbZXvqInFt)cEF3U1%1RUR4>I|On*6Ix{?nMh5Q(y#5b?%-JiF=7NqWyD2T)eA zyR0Jwt0l}kAY`PqnWWD4erd|8%tbNZQ+=SxIncXNXtiRYc+;SW8fCM=pcqXQFCj7Z z9R85sPvF-z--Eq6k9vkz0wdzp7y29?fZm}GOi-paMB%TO`4{~v>%ag;v~c=o#mb4{ z_BmAY!Zj+UT+Czl7{Ck)}m^ z)E~`e)nDpuHk12vYCPqS+KjX~F~M+RG+^c00K<%-c#mddR=_I1+Qr3;f8fPt#)SmC z*DxdG2D{r0b{xUR8?fv7#l`m+QDQ54AM4jpP^W2#xn@klz&rA&P zWv0Q*bQcr#Y<^!BMb7|t@A-4;N{(f#)Dst=Gl2zwZ2qs16kkCXT(klIR2O`LErlD2 zXL}h@O}6g`Wbt|QCLkPaVf@YblkuOV9NegrqMjm#aD0ndmWqEFUmO3_S+C;e4I!n7 zSu{1CGAw|BeydQnFIfSVCbeRyJPWP>>R1OP#!kW?v%dq2TwF-1>S^3@J&ilAr?}&m z^vvKl<5lY^UbQ7Xvs0Gz%wf8C)t2=1@wu1wwDl}BzeVPEi1{rxzoYo;>@OKtwHkbm zOWR$b{ody`6)KRpqA$#?V5E9L8cUCWFm!d)JZK z_!?R(Q&a3`IG0@rHyGBbYLWLwcgoOHWNipJlGxF#mjS#&??Aw&AguW4<-IYvYCB7S zn_X?u4_RuXe#lf`>W2*VseVXT%k@K=ny(*H)jNE^&LI@iuYc@n5J+TPubiwwveYB` zAyYk|A2L*_en?k$>W4ISn|?@DL->IDO`()x^>Z`-e2G7hr#QC^#pm+REc~g6Pca*N z%ul2BJ3?d5RtM{cEY(s!WUAjawHa!wen?lF^+THanh&fNmQ+x$l{#JOwM0K>sk!78ES@pNLSD6hcxwsen?dnd_cXHNmU)XeVR~zSBNlk6I>KR--Cu{U8%n%7^l;0_eZ< zhqloe+9j6Ir^uMiJ=EAiW{pQ`Fj2^0^dS2lT(W0awh)xW^iDDowUYI9_xN3>I!Hzt zje^5PuElg=V6-)Kr-^Jspe@to3COFALfh3AWTzUs0wqXpYrKA32BGb01pwk4ENG=~z+jFhYx#?1K^Io7ddCR>yqXKuTL$)@ z@>`=ei3C@v_42zg{w;u5kH6!4{Bw)<{N?&nXct;}tBD=a?H1r_Vy(Ia@X=0lwb!sC zU-a2jHOea`iL+`Mm=j$JtAd^UKsQjs*{p&9O(jr{!Boq1MjT;92<|FhxR+8}F{GH= zs%z8|2O!r1;Ozu9zzYrly1mB0O%*nMWlquU-9_KxWX!TMj&f(b*vVMWy3Ca=Q8nr; zq}$lrIY1w{D}1n%{sz;#mHOTEwldQNw*YP?K)q?r3*4AHI+#CaJ;V8#XuwR_ z8x+hDJf@J7iNlhV-_Epa^Q^F^R<$A~sspSg;C7V5a8w=k)*v(8kFw*2%BV^mhnAC( z9R7d70{KExg8XXzVT704+$;_3vzKo!U<6nx5JZQ9W3)5|^F>c9wyUe~BC8_X)o#`| z-E8y9a|5020IeZVAFz;^j&TEd9H7~(Vz@iPPH(n$Pl;2`7~G``_bEBgsErUnP5Ji_ z^^=Ck`5l?wpw{3Q-14~;fIYRVQx~I5s>a}JREu3*YVkqj+rvPwQ%6Y#trANdmH38B zC8B3wxOb_<`_OPkC9ab?)~fEn-l$5P0{|QV62?9%U%0tmz(AvxF4%`VsanHQYD6i1 zFj96Vp>up91&fKDCgLJW@dgqNrMM!CzqM*Heq&WIbhIiQA<0m24QhrwB7Tw$02x{* zzoI28Oy&vrjelxX;$3`+;U)eZRm(qR;F41y^cODODznk&OmNzqOL2LTRe=JUL zQRi?Vjb3j=Vl&mr$j zX;Nw3@vIG4Q3p>YTelX!@dtI&J|$_=e%0plQvOQuQAw#$+t@|WglNYq#XB7MvdXmZ z699@=8Kr^t97T)$yG6Qv^RC3!aH}<8iR?BiWZd^lg^v$-=g2QxH5Z7YQ>0ZJ)QK$G z)v7Z9W_Qc&YBoPht3G7#(B^RU&aId@?9s5>)!mZMG;IGCY}kFZyQQLKa=W_REgQi> z>7WhjJTwW%`sYyqGg~Ihqhhi&sH5;b9+G);gE|N=(d%S8ScB??98#r%$gQc3?`!>v zAW|)B!!F(Bu4*0AShecZ3zE-N>oYix9C=N1)vH&s;yK8v*Y)5G>$Uu}M&-3ZSda% zJKD}NF^i>IuIB0_ZG>*vpu#Ae)!KS$-IOC#8YGTJn#-u&W>PM-2I@W4n(EovEk*)a2zWI!JMX8@xFl*@1_Pg~p(0?B=~v$X2^;Dnl;Q4I51W3q0N>;Hxy0%4!QiXU)c6#B zV_jiGHcH-RRvZ(kB0*=7_Z{kE$jdS4o-^p4Ai5=cqB{q^Gta#*2)b9iWBE(#!@~HG zgRxdBit`T)#-79&2Sz4r0!K}BYq*Q_^zImPqpeH{Kh|t$Kti|rAW8@Vc*5) z5DS{~HULz_S0SQ%i{l@Qe-mFBuXepZ5WheEg6n;@+Zq@9NygVf9~=7#{-=6_4y_FAMNBVG21tzQy%Pn4>i0-> zYu3?EnqDD=QE0UWb(IE~ZK?%-C;?t@0NCGv;XVSiA%<1}hy|$WxST$}frXaH5;tUkNxa z;F9E^_jG32b^-t|LrR5&2DsOYyEE1K$8}hJ<3J-mFoY^%yaz7Z2iAcvmS-aaFUC)^ z7&O{Guv9nD&f{&MDFm{7VBazwz860xGKH4&B?{ed`?%)2AVygbylmEnc-94ByTR@x zghk?Lr}PutrQhJn*~`M6>(1HT$*EW$%MW%c(rxr@9iWBoO7H8W&tUp8*p6(MEg8Df z1!!A(oZvwb=id?-4>reO0 zh4yiPeq{}RFhJ+KfzEJ%R=L@5gp>Y0)0-K`T*Gm$5`Myqsn%lzI?MpAbpx$%fNpn} z@TQY~HPbUq=4Y4=DJsw>qJW|BpuwA3@|&yVQBLM#nVB;Kwnh=sZ3gsqfOfFXXBeQ) zZlKd0picqB7{N>muyz7^%GU1-4MB%uXg#Qa%SV0xt6l5oh}II!@7mhwA*OdW#eIl$ z8)%*bG>AZ)tB~*)nXZdLg-uEKqNL~!ZVd!PIUw@J-tEmv3HqZKnQX;w+yfolsl?sG z09|Zwvs$@UJz6L)v0KES?BxBB?77+G^|%Yz#{qhgK#v=s&BwYheT5ez6!#Lys^Ma! zlYSpgUHTDf*Dm@MC&Oj#43n6i%#vX{fReL&vgCFH!HNyBDz=Zhm@s}r+a-Xpi^_9~ z+Qf=k9BS`ERAeD)q4psni$g7)ye~7a#i1XLaRF_>i>W-@%jVEhH_&?yP!WL0lng}a zt-|!0&)Hy54(8o<$zcaRi%73BRU7U`I@kd^fIw5=*dmWEaRcQ!K%2-Hs}AkmK*u;h z^9fXMFtu<4Z9m$s=+gwc(a=+rP6e3mSZu3as$2X09`{Mi+|6$fgW;z&Lz+UgXvB;&`<~HPy&5sXzJ?*@;N|1utJpv zQObur6$h}bhZPu20*c~zkv+f(w_$IF+%2J)Pi<97LRcrDzeCt(!MY*qteke*5qC` z0p*hCaMZg_>h3Ry7%MHCJ?qqIKmrfMYRh8(!}+=o^L1FtoUcm|j-Ib$un^hn4@s7J zv8Q~51mN`_VkayX;qG5#C$!KIIBCUX>8&Ap*$Mk=h+I2?M_^;Z4S_dV9@hylj!c4t zx?}s8ogk-gyPOFl-4k{{=UNXO#^$~T78YFzbSmIHQ=Z<7MHklwtdr2XY=FZXbORj1 z2B>d?252EE)|6})+71&wI1_^2lL_-7VB(KDE%~JYcqVdYNuZMm^db>mmw;%tz%kzb zd;@tRAx9HZ&pNFl?>88m8@S^M*OzcVinLg8*BdzAKEslaBwPo=l_$WRXW$+*aB`Pa zOTvvwfIHN{on+v|Q}Pwq8Ec&Yw;2*f)?IGkj$^rR5-v9ZZh?W@v`f>c{ZoWnB`H=t zo-}Y94BXMgcLU)*N`SlFzy%GQ_=36+?s}$#`y+zc^5l5RWv5KoC9$iMCAMp_#O|vT z9ElyOvp5oacCy4u*m$_VC9x%=L53r(vgG5E)(hOyiZes&)UA*|P#wEGK~^Rp6+*Y3 zt=f*rR3`#F=Sby+0^pfA)W}cG2?;_m(;#}w2#;L2Hj0Sk z>ML78mK!*I@k`Kq6btM{xSLIZLG#-$0e^wXcc{THj<75~l%9VX$p|BY}AuW=aunSq;O;KV05g>X+Gg?xD4{D#ag^D~{>dMss) z-@l}<7bWZK3$!RZ`Z|&<{U7QCM_9q#W)zrOyhza#y}Crf{kPOzlkQ$ADEVfBC# z9I4|telJ4sOR72_BD5F!xg4@ezQb81%1zp*7xKoyXcyo63*4Bgc2_wKF!4v6hB@CL zy4@g>?$U~giV_eVCUDfxoeiY)g|9q-g!|Z46VbVWa2F@Q-EQFSH*nIYyAW31i0TI#jNiu5LB{Ry3}IAJ;{_Ly*n;h@_ag>$R5KzI>C{=&$SpilJ|^G za3oKTEp}T9rzT6@Pf3z@n_Kd*^~_mvXhXfb4FZ797vIDlSdh@X2S|0ttYKAU#9w<5 zBxFe<8l-bvmxkFNeUNIUnSmve#xDEzxo`I9Wd&hx*o0$OD z(!kwk;KXRCCLHyyQ!e`lg`>d0$>2SmaGew2*gpt&n1K^xWh~+LNq}SjAly15Y+^JN z5^kBKSUhI`ARM(S8$yhgEW%YJz_EW2?j{2#Mnfv$u0#s-trP!-W52W&92ql8J#v85 zvRFN!ABLz>{ZORt)DMN~HvP~`4bcy|YJh&oQdjUHd~>E*Bf}~k&KEAhU6D6KH!_0% zdr!Hn4uo$`GYL4PFx)TI^pcWOtZ5W%Rx7OjHuO0N-W=9lbU*%w%I<Fi1U_B5(Vxe$*SLCi-H3|uM z$Wp`jfNHvyAwA`@r7P-+TGN-i03{W*61rNIHOn9+vKe#$1QvzFv0cHc)7?lyrL750 zjk=g5j+uD_&MaH$5a zlYzrh0q#E{9M+IXU<|Gh{({UR^*4OLL!r7(KlDb#;JrG2Alxz;!a<1KrLSBX?Bkqw0r=FwRjlYd9kYy}#8fqd*%6@> zY_IPrZZ7u|_pq-fX3(vgMv7zyg~F}E2IYe2x=*xQ??!{8%ced+ok=CIqngm^?Xp703j14FW z8xehxT-IopXFSdj#`&Lvu$0Sdrq=WP0>sOMd06V@(%rME$Y3q?%0@7r7vYOs!{~cJ z)To1iat@hl7jhRS%z+TLdKo>mO>DNJ3u?T_n~%QrxZAdLgl0^u$rkb;xXxQUtnmXv?Z zAEa;#KL0^b{oxB0QtEXAKk^t1{==hl-fs6)CIv$0H^)&>qwX1quciDt1&``DAFVNFR|l3y~yCO==A& z<5M<2^IijWCBLEifzVlyKVOx*j`=BaIBgj+#h(VE_~$w=kA64V(~&(sPwQ8A`{Al5 zX+(brL`hf=+RuTq9_^8|YN90Xx=VAOtF%P%# z7J{9jI%#q8!!+|V$U{MTp<%ZOQU~Thqc7v1O?$L_TYQesw2cY+7n&Ns1027jKx^Hf zRo%{v-|bO}rpIsQi**7>vV85${bGu`0XjqFgU3zXFSdfhCj$cH40~Xk?1Dx_L-0wF zM~46>_uI++VnHO559Zz3|Jf8j`j8+0L@tXT8%{R-I1)^1njg#;YX=aE9~j!DcO?wz za+Co^em&UZQZKo44KBVu-lL)uM4oC@C9rVtm3@Inn~j1vKNZOxm-#~9m@#)vF&G5h z1Opq#1ew?tO8J>(k$ zn>fQ|LO2iXaXZ65)&MfUXv-#;On~v4I6aq z|8wnky5%*1zoeua3;!qPs?;XwR9UdVl0t^9O811AqnRqn6=}!AL3A7&{w#oY8bD%m zB@+6VUjgvUpKHh0Aks~@<3fA|4Nf~g#3Gz_9M0T!J3gYJopu~VXjeN11%UqPpt~iz z08B@CCbeYZI!`Cb?U|VGZpmiMdlp;r7Ph3NAOCah`KSBi7|1Dj==8@+(EjfJc&!F- z`eO$I{K@_pgK#w6jyv%+p&gfRPiV&(%x$;h3JvYFuT9ud6>cWzQ7So>~8kW6!*rW$YRMwm-6Gm@jq(K(u`H=18|w-7o}WTvhX|rQK4N zbUz*k+gkn=^F2TLyJds=!cb~E1DAo7p;RQ6ZwFhd$fL(a)yqz%JL4z2=d zzgn~t^Fgql6wCL#sOy;5#s>- zZ&)SygK@wbz`AOZtZ|@59ZZri4p;zfHGnk^U_TI!pW`@?uq_0>={mI#U4Vlyf^V$c z8<1qxScd9Z%$-CQ8o9Y2)^|3> zI<}zYawvp{eGx?w_tReCE8k{syAAq$kqd$4THGmiD@>yym>d;hd7*z+5KP0>=CGL3 zaSSq+6Bef8klT{){P0@pc&vp4qkjey{gRc|20|QW14HX91e$}K_)<+T8%@(&z75kD zcq)UyJpyLgHe;57^5I!*nQJ)0$jzU_Q47RdIk!R=Vs%4m;UqF)p&5D1_|sJXDq{a? zix3#Ae-)o&`d0{vP4_S6i#-Yuqrbp!9?SyngS$O*ec>xXcaZlFC~#;&@GRVx7=#Dt zOkboM9REGDe3%J=wVIQ`gTS&kobi5lL@W3uKl6k zDeTqa+|CPqZd^=oI^pcT=`5eZgnbQk3l9MGP+MRsHYKVTkHSVJOofYhS7jOZ%jTDD z3yOLhX|BMap#c92M+?yKqtp!ueoOAM7!S^zarHgf{;k2wwt?Tonif@Q!WX1vzU1s92%T z({q6@453-Jb(-g)1)j$$Gpl=qmV2U=J=(9uqGOK*<&}4I%<`vH2 zK5kxjhpXFsp&s50&s#kx)Kb|AJ-nG}0VFA@!dJR0T*L|&CRMo5RpBCC;WS^)N~|CJ zL7j1^0}n0Mbzb*_)L8`Fs&j*CY3dB9q&k;$&a3K{Ne;zYp~{Z7y&ZkHywV@q?3uV( zJ_NnDlBwr`sYs1|mH^$uf*vDLQnwH?O+&^fa~`#|emD6EeGc}43^~lqTAsXs;GY13 zAJH!EkHK%9A$2`KFalt`GUy#f^*0l>#XQX(bLVORpOx2!u!no6`A)q8=_TpDQ-{Mkv8<7v7%A|5{tm1=;H*V?{;j#M z8T1-@-2C0mee*Z%>oOCJ^$L{mb_b))c8v)F)RWooB}vsq=NRZFEqd8BbkNzOBdkt zmS+C&d4b48v5ljCm?nk(NPqkfn?x3Kpn%{`*LH^5 zat~_%Yq1N9EApZIw0NqN$G32|G%R10{0vZ3y>D6^<|k9rb!xbKrXUIBWAEV8GVc4w zEuMg4{J7D9wvWrO6jJ=*5|GkelcE<3eWBhUrMHmMor1L$awDX4x68Ar03>81ZuXNq zlwjXBC;^U8cV262z~PTaCqEdK6d8hTD8VC#<&hckUN^8hQJ< zGU#*CcgQo-<6v8xF7gRD;50-&YJ$8~&yHw+j1vxkg=VQUxSM?jDJT(gG zY_1F`-(k|H>YyLElx0t`2mzV`JTTmq3HxKQPq zE*Q*P9PP`QKjKn6a4v=uIr7UIe4hMj9NO;tB!=V_O!hU$h!r^obEEYhxuDY@>6Lm( z#2fdu2NQ9h>o+@m?Y~eLKuBWu5xz68`@skoxo#g!s}=>CXZYSh{rT@Wf6gM$_|<^A zD02QrVpr`@Ln#?;7(7t6vqcVD%uj*P2DDYCul;J@C(0jbu|%i{_eJGuC@CmkEv0;g zN2jP^HDuroLKs$nVFEPAac7Xht1&jf+tE^wg&2+^eX$v_2d3(wZWx*MgZsP7t@Fq6 zFg71R4oK+&QsNWT=j<$@DvZzS=5H<6r*txU3+lxQD(yE2B#563vVZCkaM&OKr=1uC z?W+;f)@;N8`09M~NVjSROycJXA?PDNi?WL26f^J)FInF#oSy>e!=M^Qe@duqI6oUB zG8{cPzXS6ZMV1oejSE zoA(tlj$9AxW&xU!vwfTk1G<0PPkRA)8FngY2P1u+Zu1#(5KeQX489d#=*m>j_-6?d zIiI0^De8TZmA>E<1RC)t?Z5GTvKcZ}4PpKk0SAH5HwtcI$diapUg^fJe?@%qA#nL= z7e!k9z*5nBObaxzw5MK>u3n{9@Ry=I^-DY;ph2=5b4%4Q5sKmnVd6qHKyu-{AH0in zq5b3@2{}5M%9~z57-yaGqH>uH{43$%(9d;h6*?l81F^<95G-ZL_LdUHJOo1Ds!SA9 zQ2t#Bj$YPhvdUgpXVSkE#adng?Ch*lk9{Rx0Z=v?pA04X$7m%)Uty3CznM7A5Dp@K zvp|UcG9H%6QDjB1I8OqL=4GMYDov_~z%sZe7H0%;U=e&nb?PRNmlw`Y4Z+rjM5N|< z-a@a37_C>QL(2T6@CG$Au~6t8V7ar9%GeL+Mjqu*iVc(SS4VncsH+YlXrN;t+%1EG z2K4|hliM$rR)9k#3UY877~N0e7lT&$1fHU;(|UxK!8Ha4S&#OgWLCpFM%UQ<{ElFe zhex?K#4^ciFsEMn0QOg{-QA#CuVY&-R!>P{o$3zV1(wOR-ROEag1Pj2blIL9lRRZ# zgCBwL;r`J4f^at#KhPh(3?Eho`}sqk_``Q(1n3eY*_o^WUOxx<13)rja~J_*NZ!M< zwZP@go3D<8EBytahGD+4J8LHKIRVVGf+ z;$n=r4STd-<1nV<%JRxlXHDqCHE#YXU6jRf5M$FqUt~b4uXF$Opse&?4=xpO_jIT- zCNZvbD1HqeGDKh+w3T?2@G<^|Ti|$pXM>23?g_XDT}%^jGgZxloFe?(7s4?+rHe9x zNBAQB!7F$r)~nVi4(IdcfsydG9OmT|t*V=2Nrd*ns~P71mKp^0m{tOgCmW8J&V*x& zMg58y;VBy{O?EcuUliXLfBd2KTC8(~3W~L#VtoXsU!bB64n-AN11j=CMLrJ1*Ismg z>D}J+lssE?7nv`M8xI!|LioNDs(g;Dkk$MD1 zfx2KdBr$|P6wk{q5h^ehzR5SC&{>2RpULr!rdCs z@9bX?!D`dp5&JDxi;RADSnas)Ioo53q^as%Lz50+9v z_#*hK;cKsf6b8Jx1>x=xzxfn@pWo9HXH+%ruZF8y{MDY&3>HKoyMZRvt}n=m;tV5b zrq9Ec3V3}k(atUK^xWbP_r;L0N2hnmHhVm$6#mjt={SdE1s>5fGOFN=Ih<$v8yg5i zG>Y#1u_STaBC=#*LC(@Z2vyegTOySXASOJVieC78l5bx3 z=H;fYcRc3H>7Nn2gY`VgbS7VTR0fpD@2s2f5pMZFcno-m;3OO`gL?i_5Z*TcJ(E*O z-L}TkZNEBQnAPN>g|GCk^pxO{=#_!c@<8Y#R`EbxMSZTAtH=KQt5iKEGFmz&GbMN} zlpBVhL#efLe~0ygKJW!@8>9k5cpjsH@xTTb9-y)J&@b~X;+7>4H{nrm&cxEbBJ@CO zd)|_j12De@`YAdHRZ#3U4D|-`-liZk0GS!elM5;)jU+MU@=(rxlbh3tr|A-zI9g zo-EseKB2kR$6H^JgUCx-wT&*}Xfc5v)=N0A0AV^1mdt`+y60M?$&5#EK?`O6P@II_ z4yt@yNG*gv3$z_r;E;8NKb$9I?SSC@j?RVZ^uU_1v}hmi4ntf0UeWfVMcd2%oOnTK zP4HSS?)t;MGXyh6Y{9;RLWCl&fY4wei>1gy7Stk-n`~}U+TWkED)=m2e&Nf(g3?MDZwugJ%nF3}A)CPsh`DP;@2^-c+t|Uo z&~iBwe=Nz=zsBi`$$o-;!W{@kCO}gljACJ?96Y$fGE}Y?LnRNwZy733;Gatc!r7!8 zs^Bl#E0ZmIWwO{SC8I_BpsE|;4veEdEXWoUm@i!K6fxP;eECGLdhThyusr5*&TuoV$z{0@gMF7s3s`tgDkL@H7Ys8br(@FVCMryvCbPy0V5HrR) zb?^`(c11}Lacn8JmP^1vY8->36teL~0@kQ5C?sLMeXq+qe9v+!h*W}V!CLM!+#QfA&o!a zU)~K!9sje9?;nkSCx1=64g+5hoP|b($4FX}fjekWY_`4 zMG=HWYnfBJx`v7ksFp8OK9)a{j7eKf@j~o1#e8$49k+F|2#~;;utFfw5nNWdy+m=gJG5cIEMwv zmc%;&qz?;XCD0shqOI-J$#uD7kGddap-#0WFVN6Q6=leR6|{D$CQ?EcPA8GSeF!44 zv}XbAuK`|VHDxd48Uj2=JvfhYWdnYVH;jZ6Z)48^a3-<60^ryHNwE;VYv8Qy)N-Uv z8R5Ky+ml>RSylpqQ3k?pb|(UCW|X$a6e@vjLD%un=Sl7{Pm;uwN#cI2PdIjJ z6L~#lb7_d$)%*<|fYp3aQq9S{i$6f=k1z#CGu(?`%L?hzuf0V4%8a)VPx?}i@l3Hb0P$KaUpU+t{9pmJFjJ%h*8|`^P-D1cIUk4 zGk`rlT{AyH-3MLgMW3Q`jkEjruQ(u}>GPs@f|IQ3KvPxG8B3VuGNkB3@e92o z`Mxv^K((qZ@W`FK?3c~(LWhi>0|bpP_!QNFumKR2k3GFS!c37!gNy>bgn*qw}qOFP%FED+nl_IGp}_zmWP@Uu%5soz3l)RU$wvRa zlk+;}w1}Q6Ig@P(su;Ob%+_`XLN_GX66=sL{<$yQ_>qJ&!5Em1jEc7Q)+|R7Fjy8u zoqB>8tRo3w8#Hdu51~DO!D7dr?{%HA=U+miZGS|Q?Rn;ly#){_et2w`+7nK|Bw>HO z3HPN%Pb( z>j(=EFak5ASj}cO6!Im-1H-pokf3@9MXpw=ibPBZCO!>srgy8Gck#AxHgCJyZus#Pa}SxKkT z{RGO~!@X>?oJ+U?C`O-c(H5_cmaLQC&ahAIBngvMdI1kb)(C7wb$@mglKZ;v4Lf^W4v)~GCl zZ-Xc`pN@Bc06~wL{pBzszt4s+1rYjyV+cYYsen`$Zdxrzb?Vf^fENnKWr>t@DnMv{ zRelN?$(`cCR(atIxt^$%31m#qZ7FR~BtgVaC*Ikz1oDkB;2m~C55?~h6dQj@%1JOx z^lqXX0j>=mdmJj$7_3pGM)f9!mEw>1Gxfb@9#OmM+qw0BpuV4@%O}+L1r*zKeMbU3 zslJmm2B*FQi2?O(YP<}x{~R23`p?)QdFabHZ|M#sHr;=iFLob5n(jXpEB5R^VVo7< zil|0+y1WeqGPye)h{=-?(~bgAqeAZf+?f3DRe4F>YZb*-cZX&2Sfh?-IsY#FK?na$ z2LF6Wpy0RsA69&k_&;DjaH$@eq8e@HlTP=M?sNqhdse-lB!_RL?J0*{O}`^UtP>g% z`W~FB3HzRtT;R^on-5wcka8-Tbil*VF7YW%b-+#1OAHP;v%fn13i;NUgr!@pGj z_mb=H`H$8g6-ul>sc*Xe=nqL9vbo0Ksy}Lg`X}yR&Bms@M1O4J_kXp&^WNxckcrj1p(c=~m>cV0Utx5!od}-tz6@qYYHp&RZm>xn^`|$e zMGz1XDS*dwx6B1NyF=8DdWbs!a^N)elr2MeTQv0}DuwOpNl*+T+eG&_2_Z@FlzRc? z$0?}=;Ty7}ZSfI-QaI1ZjK?YH4O=dH)5lpG@|Jmt$%ww5c^a74=#r49~k z4gC+8h8o&a_884XAd990tGWspwdWf4A~VRR3b?V<1?H2DOFkXXTqojF^bkYx=XgQ< z1vXOx=}G+Df}hjO=ePCePV)H_e%|;t%LMQleER}#vFqV=!}5Y0Vn-)mxw&Mo=%P=2Z1D-?-?%Nuo=7rIM|1 z`ZDGQBIeKe%2WQdG>1KBL(26grB^}dYa8r1B>E$_3GdZN@C&jNJ&k;rgF<5P;lhMP z`#Su2p6RWT9?e5jQIdLrW0=vE@yP4wLCDL*&L;77CR**)&Llp=M5~n!F^Ln9`0iNr zNi3tIGbt3~bR9A#1xRn2)OHP@wp zf-gCI8Xy#m!4ibo3wb(9k53Spa+g;s{nwGH;JprxqfIS%1_<#Wyt_tinvGgq&6Fh4 z%k&8%7OU245tw*}$vlvmUt;ECnF8N<8}rK(Dx5;6nS3{zd^a-RZOqpfDV*Xba;yv< za98#lO?HqjG!J3+Gnsvw=6vkc1o(3eyiN^zWjE+PgrA%M=P_`%8@zub-nEd9SWyC8 zEkum2kZc1dp;E6Ct{qb33JGieD5vB|Pt@aItQHXN_nM@4_$b`|hyUgAF1+3ugVEn~ zAWnhAy0}7cF1JE(5P*Uh7lbs}F*tuN&Si!b=kiaSV&F%dOWy*{S;KdUF0Uu{9SGX$ z4Ql|NQN*`kW2o}yVELRgcf_{@!YPQ@Lfn>n-!kJ{?5*03hPQ~naD%LK{$YG>p^488 zy5n<;Az`8hbbKz?G^rAnK@9zo5`=5tm8ssBm&7RD39kddiPGJR6WVl)ZmBCqH=Usz zY3>-^bXSaSnicZ1XSkkgY%?*s?A+GL$MkUeA?WNF$In6>Vfo&~ zlOEa>WaIIq!(}mjVeAZyk9vBcs-=#Nl+{E$X_i6Zjwh9Pczvm@tWskBi|GDs!Hdnb zl>0$^5$ppAHRvT7wH)d&5WWFHgg2-80bP*ubIEB`fz~#WJSfLND6X2l3>hf@0f&IY zAFTJXJ`UYDpSlqjQ`D-VGoTw?s~Mv;z)S+jL3IHFOva`s zt#LVNSg#MyKLMans_p}dy6W8G!$5o%&Oc)Agae9P{xXSc8jy>5HG>qYF@JQXy(L~M zbUBUy3xtrkI(0$|I&c`W;;cE?7W`uLb5v}(p}R&+0yE<88IN6Ub7TcM!>A327mhrh zbTAC4fQu7e=o<$5O&mM5l0w`OMoFeqqsC!KzcTT6uNutwQEjm`y ziSQT0-|t@noo~pfU8A}RiaJ&G3h4xX*zly26DC7vlm3#i^q1U&q#2%R(U}I_+U5a&ybw*O5A6@l&lh$;(UU<+lWgM zU+dJv$N+xDx5lx_-pm)c-*1K>(np6LOd(+V#8+7MG7UDdw_q_|3-`@1FkOO*u{1`> zMCB26C%Zw`t6{&;{>P?y9up+nD(MZN>qE_ckyXYy%y7@b z)I{PbrGBi9)bB>CezMGD>IYwPEhthGf41w#?HFA1S4X?ZmzpjIVnhBgu+k>~b)*}M z^F@Tr(h=~-q>n>}cPmNyDT$fFBEqm#xc51j4v4sxp& zDAEtB9PzLEm;^bE9LTxz7khW~ISD=-V8C0_3=EQG2T`#7Mw{H~rch{U5H zh{n^$fU3bmHOcz~5z*;GM0pOPX$Fz-dJ(JE#~|uuSFMeqsECN3BqBPBh-k-DyK4R2 zi1=L{U=S7Ah%yXC#YFTJ5z*60M6(=3hq)2)yIMeU!J}duQ3pZvw0A6U5ZBYhML!pD z4RmlV#Q{6EB=Nf%A-GIV?U}>z@#7Xzv|S^)t=QCOs0BR&f5*t+i_G>;W|1i6nSa13 zaEByXK9iRPbPP@*obCOm0UH6Z__GeW`wY5)9LoSV&A?qkbeBk?g|43gU2Z^MHlQaG z6z(A8w?MND=v{`$=MCtN=K;zog#+5sfa*}l*oocW? zPtdCpu+B1|pBd2U26QArD-%G=1eE99YUkc;?-T=k5rK!nc+__ zk}3>64pmt0a^-Jn@?$&{s<=m-wOla;`6o&K^RZ6hWFMQ5eF=n*GVwQ){TY+}@66s8 ziE?Kh<56O?`GoVvKQGb-SF(csG^yY7?OTYF;mw*QDd(H{P|Nvu_rPG&)(S+A3kG`l^qQtjAF)~s9`A>;`YX#D0L7it|Y=z z5=3H8JZcc_jXiOmgXmc|BC#h545GcUCw_d|uG$p_kz-G^Fo^cXo|x_+I>?Pk?1}eC zE_k#z_QU`O(IV_7v26gcCvG>0_R^kc?cf^a#wGSdJHhoQ?1^%)qcMA;;3;(JM0?_D z0Zp_g+5yaMPjoWq20~9#LrjB6(LCAxB+w<=69*X3WP9RMf+pG%8&E3}nQTu?CupKQ z@umTtYY4FHiO~d2v?uO2pvm^cb0C*U*618lW zqh%YnCqDcEfSez1)f{|9n*9&#iT3~iTVWpAZrKy#QKZ|Rcud12*b@Uu)}1s8{+vB= zvEbU%o@h;cf7zb+1p>vUPPQlJJ`SvVVNbki@+aF9qnZC-+Y=*9_GEkF0%re@+Y>t= zPON;gJyH1>O8*P?1p5c^CEF9l#P=8Mi9&-f*`CNCzQ15kq#At5_C)og!1uq>o^V`- z^cHO}@vv~Cn8T!2`}KW_be;`S)4zfBgab!-{#MS0U|-A?QtY^pG_ED47lgh^T067P zeKu=njh~}opZokb6TlD8MFu?=avE~2S^-(711*D_98*=4uMEYNgP?LZp|@k`f@{je z3as~(PoSzczB~GQuNDIU2OjHb?)s7wn2<|fl+PdOCUO0}7?@kg;6RLmxGB+@*L6UL zkwcWnEF&3RISBj;B7iTBeMHtwabE!&Gl?+g5#}?%#IZTb0@w)Aq6lA41qC4Z$7QN1(( zF1!tTFCf5i1eiuIvhwf=CU&{d=AARq4ahq{e6U6w{6|2BvV+Nb^RdV_{0wrlv(9p%gJ z=oh{c0r$JgSKg5q?ujQHy)jJT6sH5o#t&FVxU)ZU_G!2#b;2=TbtLR?-9ykz_uRID z`$Bb=`0MbO6U7Ac{UGq74I)5e=6_2A$iCr`hiEM7yd( z&;;Oivk5#GuUR0cwq(<#kz3Abrq2{na){cW!8iZ!dx7sAPI_GX{n2oZ?>{T!dQB84 zt@WDf$IN<7`-y+FUc-E`P5{yKbsYP#1aiJpeOm$QcwS6me5)^1?~9BCL{)4)AaI^w z4^MOpJ?X1DuI%f>dIt{sRZz0Ssw%YAiL0t*xKO8&t<8YHKWQBK`|_ z<6nR?>PmbvQPqEu-W$Lo$ePf56`ubRz13_&o8F)BmO$@(;MMfL$d=Uqq-zS%*8M&9 zdKTHcm-pK&38Q3NwPih>%~#wo6qfskvanTw=efWhkoH0&Vd!iHeD?iASvb|R z(ffz8(69CVLtB&e2{F_oj4V77%40!6`0*)FxX!MKWOyEv5-uzvEn@Xpcfo`&Md#>Q zNJ-A6ZCr$aJzqF4+ZVYEcp0-D?v@$|>C0M}09&COlvoc;*?CfW1cx%>1xWJbA(!w> z@mX-aG#mrFtIFgQbr6be2xZ=}_{FL!o?CeJFM`JLj=>i|URK=h|uZu6a52s8+Ge<{Z5iwSXbZdQs zgB1>|Ny9n08Q_?DpPGv&PfV)h7$h>Az%R(3>>q;CfTobmih;=OXfw>)wBBgz%efl+ z3vb2$gJF-HT5?o82a_F4xK;VK2xbi;b_UVjlHH4(f-&NucXXAkkyd#FyRj(%+tn(2gj=z+4MMAf4``eBMNsgi84_$^b?UmU$jC5f(R3aZNstx|6z zBS<--wEG|&Wag%x{cxcnp-xTIaGuicw=~Ny`!4utY4>2W_zFW&of>9;FP86bly)E0 zEIzaJ|6}h>!=oy;uu%|@ICeY7;b^Ok2FKu-IEJ~SLE{k-MFfwEIHM+_q7qC3v~5d{ z^8kv9ii!#t(ZkS0rho$i0)kUORH|tuC?Y6e?)$E)+Pk`wIGpd?@BX{L*>)&~*Lc8&0d=JX60x*6+Tk-~3NX zPBL}-3MMPi8n)x<$%V0fQN^s^xd?Z~7dg8qK2~Zt|C7>%Y|m7>gfiG%oW2e@B&jn zSn10QbIAuJhSSNp6NsldOG}rnc0U?Khs{|9w497p0DcDN)poy%>1(?W)IT@qpF#Qu zd%5`z+r?Y7nAiR87D@W>y!J=WzhJwtn4oB2aIo&M@3Y{+j@_CyHH0*g_ygfzTl05% zxER0c+6o#cs0&P~Gomm?Msc!L70^9m_vG}bYzS0*OD8zK#zkBfP(Hgi`=w{^W6FI( zpC5`F%2oj_M2RShoxC$*aq`y+M&ln(z?xUST7tFZO5zZUWAi}ayUzr>!N%%);`zi0 zU{K+Cd;G`deHCXJ7>+*DTDU=+vT+DAuJ_$=(IA|Ig&t)lyaMq-x^57^!NG5F!YzOo z+Q4L)u*#u;;O@1Vk6vD#Fabi054dt$1n>cE!_4lF2NZ)}{+E=j_YolNFQ=Cbj=h1kater5RjWj7hVuriQ~#Bd%g^``*eo}{Q;5~w&C$`rxgwp^}-8-6r2jvwDdK=}P` zd+S-?as;wO$ zP*x!L$O94nBRH2C@u=ns9R!tzVwhkkN9>U>)uAicKv#VRPHP4Qz@-SWf;(`g zHtj8JcRZKe$6)FKF2(tc0k$Qij^b?RZw&YSF&>{>$DXLHL)iD0!p;!VJ9~bT~ z>8d3=PTst;km(-@^XnxWRe0}7az~RpKY7VL-{ihQs5}sE;4gA#J5UtI?Sp(qZWm}S zxW=%TVcU8AYHTq+Z=oTD!%5yK#P_P=zGD|$C3xg_BZ}& zRl)rLb!x~&q;Rxl2+1M-Hc(1_#B~VVO$(KHTB}SUSG`+xNeyS|R~gd33+YV3NiO^7 zTv+M@Oa^;0gPG#v938GPB(@n6y5=S6XK#88ihw?&w7}q!dyxKjFh~hYTf>%<>C5L8 zt7@Q0nB@EUP{A< zZduh*#IZKLKS7fl+vgJgU^_Lwc~WEh>-Ds)a?zQpwJ1t5h&+)~@*6EUV_zqKl^Slu zh?iF%?ZJchvKoW}N4<1gmr*$Si5pql5EsZVaELo9k#_;7|)jvhTv0UWk1rgA9eb zj4TSbRkw;tLrrVdh^17r>kF`vB4U;#&$*LTHU!_(FX~-{d0iF2MJfWt@+}>rc_Np*vlxZaaic za=l7X(PcYD+A(|Bz-FCjzXAb2lx$!VN9BC*%MENOTfI8)4m`ZVQa%S<6Oo<59J*NB2C`CDPk+{*KW7+?f@Lde=^@I zgK9aSS34Ilpb^)Cs5D$uFrp4fdW`+pVzW2M&X)uDLHtfYO^dGjZ4McHeV;Y&0tm3tN0mgbFnRUt(uHi;?guP*Q!tnMT@C%oc~KH zlT&903)c9eu&hos@{CWWQj>XrWN?aIo$3iP zW;xK@)LblHZd`3p3tNb8I{6qTKak1yAr}v_ZP?0MpXx)YznsRt2`wt&2N3=pB%izt zG5EghG!dqTWTuFfOAkNB65Dyq7_J!F4ZZ?Yp|}gwIjl7r{L!y*kvwE%90T#IwU3L{ zxf~ccxH=IRI~Ug!;Sw^9E$*FJ&8JdM@KmQAd?d9h=h=Y*lXt=0G&HCUl|iX@{UYX} z$fy&nZ!kNT+9G>cRk6%-1bG@+omvSM=b|g;Ov_&o1qybqpkB?r4HTBZo=PoCpWoBx z^DY-rV4n;x*Ab5j-5PM_wAOdc!_J_m}7(A_>*9zAY z@pUeF3ehe?m2ODo>h_L49A>hk4^;ev<|C+M3s$GxKt&gM zL>8Fyt!pUG#(hbXisDxWz0)zjL>ZuM6_3JpDZA%hoS+)S7!36H;U8oI`M;8+|1VJa z9*E(%ntcHJROmvd&;k#F$>3>~yze;E9=#$X=s?9qUXRMxUUZH4BD8iBs8 zz>0sQiFB1#=22~pNEa|4h;%&oNdAOkaQz0_oRI$h&|7?^8XC&(2tlJa(UNa3mb?r2 zk}G4rB#{>os;H?7RB%B)YGq#>)wGm@(x&SIcRAyQt_|FMq8h4fjpz@HHPSLUh6KD` zB4qf#R@kTxe_;4mH-Z125^1$(6Xe5Q;vfT^YoId-ZJP#t-#}*>sni6iGtcs*Zz2`n4CEUB9i9*TayI{dy6 z#eP8$O-)KINWIDdwVlS~Y`ICL$Je{)q&P0VIt*70DzsjGdLxpKvntle|0m zslcKA$Xz~r%~537cDco{V!lY(GLdhXm~1NKLqnw{nHYmeImq7TkJ-Ys^=OYtDx(MB zCk5qZk+@=-Jbf(G>eO2|fZC;u1XE)r(i7A9tVDc8Qcr=3ZbH(s_mm-+E3wt#`G&oJ zk>G)hgp3l_-c0W3CI&x!B>7O7I@RHMCh3t#nu?|7nfpsN(1=m!i0!<0jv3|RkXU## zaU%<6r5lR#utm1Oe?({5SH66^hUIibMAFR7bD%qbVMSXz%o|fvc0+T?#tUDgJtP&T zN{(ZIz6LdfDS@rkC1Ei)?z$e#!WUydwfi*yF~(ZCgn&oM4Hm6BjUu(LZ^VrVd;B~a znb>+J+Y#h&H(>3^fIe|| zOO(===OhCDraXVZ)?3Q+Y7lEyo_FY+e0g?fPL7^ltO33}k03zgsYff0|7(?5?zWX9 zRbJ+km+A8Id|jLsweBv@*EO>Sjf^|TeVqyfttGn-IgLQqLc*E39`XyhK zzsD9r`@jn0r4hCkGY6p5@zP>^am8vfA7WkqiUSPSqxHtEu3h^ZPndCowS-5E(f5w* zd@ny1*x`)_O|B2@=3OcMdbJknhwlKzYR(6?2?$iS?*p3yZgD#o3ic{q8DCi7YD7Ml zb{^{g>+;`)ymN4_<-hH2SN`jv2jU&6Rjp8Onesmy{Ce^~fe-(8Ex_Aq8~~YXP%BtDAKCINkXLIwx5UDNdgd zb9$9&ovj^5Yt)NmSo~YR{R=3Vm$pPZ3&3{>iQ8`(K;jTe^0p^u|1R}wMFiJhvd>z? zuN@664&;q+f$kyEkjv8ZH=G@L)H_B*iw#YOdu#^L%}u)}vw~C1)Q>?V1jkaZ+z1ErILa=u z9)oCH+l@LX3Uu9~_RKL#Q?E{iicH-F>?MAcfp)!mu^&AHG+Jdk6Mg~QAHY@bAYEW) za$7PLvTI=+mN?IdsiMELmaCyE)T{z?E-c2BMfoY5BtrfWZ^o234D=oogK>;G0(Q2E z!LY}~R3JtQ6p2p=mm$jzpR*)z54^$r^YAG|0$o>7U1H!$e}XDkcyL`8$Zk4`$IVC= zqi%>*4`C9$L8V80Kn~g8gkgxGazOv|V&XpI%UMi0gk{EP7SnZvAdBOKe|IpA$eR(V zE`JKc+he-szZa;y7+fCHA{GH|F4PbLaQ+yUj1`)z&I4;UycMH$;v8|bH~ z*JN*rv|q_L_#k*5QeYGsHv!YzK)SCIe101E9RUYM9ciFb40I@=d!|7jFwp4+I>SJF z68dr)^a=w#&OoOcXnR8UPlKLfpqMccUd8zcUVleTCh@(at})Oy<2D(l{h)oJABi@l z`9jwr3N0Rdq1EApWLdAKfKKXTQmB#`GxGbPVmAT?gGs8wNwq8_=cSF0_2__aFj8CEB3~8us_9sgAfl+#VT^)YMu&}i+ zM6d^;7-o*+AKVJwn4bzxK>CIG7Rp1Vx>RDT%_zKHy-sR!MlR0eyB04~SJmR_I&UAvR%Ad~4oCAK=u8{hcJ=P}bW8HoxglSo?ufr?Yo zNE~ZOgba!Q7!sQ<1BqRP1cjSk7ZqdEXe6OuEc^Y226r`+jY*^dcXUJYjqScaBO&1~ zEn4VNqij#}Nmd=KG;z`L``~!x`Jr_-_hN-?8KhoH{56m#dQ9&WW|g z2GJrqBwE9eL7#X|{EWlUNi{5jm%fOT2HL0J8Le-t(wa!DbGCNZ;uqf9Xe&_ke72p@ z&0iQSeU@HbjGEDtVfk8Nv;a;JEMG$)$kG%=YaAb~-ASi^JNA^5a`lyPB}Z5Bd_~%b z-|$1iiHc~9L*OCQ0Cwtdy0Dstb~l1k6ql1Eb~n_je5kue)X2*S3zA|7A&n3?8psdg z308e8-L}!3>@VGKAxsSH_%8_$>cO+t(D`&MUkhQPL&Sw!CFf!dE9R0gomjG)7^l)x z##*C%vmwUTvczc-di0`L>u~8u5uDZ=gb=V)FM^Avxk!0D7b)}ZuD4|=8<-Gz>BYZV zWO^0yA4{!c&mt_`xTr^T4bJ=1he6}K<(uU$>28qLOoPNPL4_$`U!$Xi^18u-Y zJ#1y>b@7P#TbGObhc5TXU&G7A;czO&&DEWS`UX@tw{0nRH>>TJaxgRpHGm;XsN|Fwws^;{ z&yCb}4h3o2;WR{;avk{{sE#l}5CLm*NS-P7(1*Y5`Zd({X4>kQ8K3BCF zI#^iV&D=zO18h@#y)u~(OB8(t^(dkb=_#Mn8@30|;LTqYsE|7oW4EA+IzeQ{u>CuS zGP7hY0*}|=qU|p2os*f-{6=@d7n5v>KPWH1pvjle@Wc4xA{@)S41Ws$@L1-MPwb zquC>NDTW3sxtPB3jpYlPyRrPFk&SOGuR%rj!)#~Y?2>&b*dKfHOkaHBh0z)_6^|LX z)g3qirn;a{k)|FmQhp+l3uSP zmVe}r^FIKu)91T0`2Vx}a2nrdlYU=LR^$BB$f??FDPdr?2`e$@vxK%6DA)UGc9P3~7cku6+$iHmh=De=DiVh>^J^b&W#9!mY? zneVST{i&k&Ki6A%!?9>!o{TWxAcPgyy+9G!; zx;%DOG#|0<>?*E1 zdPaNnH{{(Pv>M6#TtvF^9)gQi|Htwk54B8__h7gSTzQYs0AJp{2>2WF-WLR#lJ{|% zurKfJ=cUVgHx2OR{RIJkBJT~-KEghg#uCF79MO#Remt34Z?0UcHi3+hD%aSkb~Ti0 zx~3e&BZ95U2C1?z=C498o<4zk9X<$1ZwXaG=JE2$BHR~^9IAL*uXf=_#dg9uhg$Lb z1dTe+(DWg-a{XwdAGQ@|;ek%`8tZO~WrwJ6#@q z{tHb1|2OvMuZB{BalikXSOmtM{rNQ-;P+pn2xzkZx*AZeu->PCpHA)fUl%ZSdTai_ z>c4uOgY5sj|Efo=qr`syl{gnHIp-}b)BwN#8bd&n{nu@PX7pcGI+|a#B^wLZL5i&G;=@SHST-g)@7({ zX#W-^J8XpdVxRvLxp2&h#de%+uhZY>HF~epam+61s+ukD6D;rUPI=3|H08Y$k5{_%@yZL+`;){I=qlAvM5CLflOYVz_dy^rm^?^F-IPND!?WHFOr*jaAZ$ngSO_e#nm7_!>kw22hain9) z@5u!|k{(Ge-J$5q=R}}w&zAfwq zY9imOamK#UyZkkr+Vw^9A65eLX^B1yYIc&8XtO$j(G>Y<&>5b3eG7?cH9O)RE%KTC zN-mLjjEC`y^Yf@qV@dA{KBGwTzHy=NlbVo1pK2)FtfQDXjHndOC*kb^Tz8?12`ivG z)@SH~z%AK366=kRXAnQ0(oRWPFG!Yo`6R!LL|0*1v_?(R5By99OdyT9@nZ@ z<5+1fcTD2p^#|yiUrP}N-$$OuVgr>oUuT@7^q7vt=ZC4Y@%S-0dr5x7@pzQoEXW7- zBlkaG=%UXH;9&vd>F9(Zg?5ZvDS2c;Vf^ymO!G!3>eR8vKkaS!$7$4$1B)C#0H8PfLF0~h``=9I%GF_ z-(fd-=Ww&0>emJA1DIoWY7Yz|MPdAQd|O?@gJj1r3i%3AaLs3_H_lKu+ei#vX;L}8w+f6H(VFzGYsppAG9gFB!mu=n7BA9%rFsV1DdpO% zHi8n{!9Ipy?M=%@1+LszN-?B5;J##1}e;6b{e|#}_U#Nk&7}3W!#5^nd1KYFGoQdl zm66M{RxS~=B1}iV)F2TGmE{LY8d2Y#1y+aP>(H!D!K3NA`v^kYvFaI#$9z40lf0?G ztEE^(MdF=XM)KzmJ~@*AL0|+r4sbFd9Fj~Ii-561re3`&WtVTVK}ue)t^ql=GyO>R zm~Bmy?EtcaJCkHVCP`gCLUMv3xwj#?D@nEn$(gsmfc|R+rnzG@xnDIM>HWSxuM+K;j%vycYu};{8f( z1Aho6uEa}qHq-w|v6|W#Q zZx;hZXZ0-MYX~IM|He~QYN_SpKEbEH#Ry-rH~zOLc)%_a*lrRXe6i{UEPMLG+HP6W zlRiobL;o>(jwPui8Qa11!+MYV9F$T7(MRf{bvXGY#6=pU?$POix2Y+~C3vUhkvvxT zuh%>kv{0X)4pFDP_ZM~zOo8`wtJ(`2$s_S;Bcax6KOUnL7mEVJ+^Wy<+0>Tv&6Y1D zNK+&7r3ASLs7!(dS@O-+76i?2w)qX2-yHdkm&>-H+VVk2`K~7N1cIw{kPTMWS0w!x z>D3nG8v1$Wx4`7X3~I~SM?nM)wdLQK(4Ufymy@AJuy{F{YZP5u&}2dT%$D^Ic84R5 z4WJm~&uSVYd})sGr8&Zv<_L1H#WTVe&j_Xtt|FnCD-cw>okp!55>l;oAX{zCmzbdX zNe8l2l7ZSvQYn_CW2AWT8RJRe0}Gp_Va&Hm!x)UjXH*e3PQ$X*(>f4T57~rKoshgQ z(g~%8<1+>_;q@Apr~ah_x#|)f$Wg^Q5K`TBAX}ZK13`7N4rHlg7^tnJjDo9K?lB^+ z_>2&V@2+7X)kX)h)wVoS{Ea%0rPfJchA8$;$Fc&uwxF$#Pnh;X@-!TzKoMe^SS56# zZ1uJ!Gr^L1fq`bFSZQH*YgjWE0hiv+z zfJvUBV@1{XM+5@dD#+BI1DD#M!MRv3we=Kvo1=b0Ahi%c?5>}qzQGg6hp82KNMqr(xXTO@@!0j1fp_COc#AX zAN#mw&I3~?L6Ur*eyA7Re2nZbEjG-wdx7erSI*vcA`+lSEZ_ho7t#_8kMimUkws$Q zNd_oe!3XV}Vq?zqGv-Vl`|z&@u=B?e%F~5}`U>7M+zlSx`1W|R9h z+M!srSgwWfL>0=SELafn#k8#3x2gWA%_?dt{mQ{92?8w!uvDi@RNwR zc{%*^jK*?!5cet2knV*%3UNgE71Au!dbI*Ziq)?tpdegZ3PfrKVVB{7JR*Wn}9Jsv!Nxo0n}0V~#C;_1TwzrB=cvHf@U5^qm6r$QKG zsr>sOx1fm?`oFT9mgQ1wVK;rK1KDb}4#3}_16gVk1I_mM4_ny%2o%L%WJ7NDGSpVu z?!Pd`RaSOy!;XM31`9~dzJ;4ij4W1=E7VRhF<5nKFk}C$SN(Rp z6fH;nr~@ITbRb*R=|E5|*MThc5d+N{25)LuNWG*3*=mdq;5Jh} zklJ#rVXs~Xg6caR$WmWQK-bn4ho#q6hrVoFH>+QvNwWV47SZcA;W!4G{TPN@*gXg& zXB+uZLN_2HRSj~fI{cybTU{^3I$Yl6WAwaYURv#p0~6GXiKcdB6~fJ={*0!cPbYTu(?Qxs2HEtl97Lz#G}nGCT~1BUQHsf&rL+O>WK)kM^}LmEYx|b6Jij~RrxxQ zqjGg1q>e-&H6I{hXn#BDdCo)ZCPHMt%l1pe5E#` z6cN0Dl;CS68}3h#vtU#b#n-oT_cl)Pi*tlDB(`3iPBescB7{Ow_9rXc5S7Ruqs6xX zZ@>sf1?$yr2Hgg6WoHIP9ExiWj$^VxOvXJtiKQmlLps?&CR@%TB9nz+1*Ee0m4sA{ zE+I@p%u}h!A_lWz2E^WVdFTpaMe!HdDM6t9jX;MQvV%dk?95@q$;}D$_0g~;L)=c| zG1O8Rx|>^@)~hZi=^4TiB&k_CR#@V_gTm9nhNrPY?>6xro^vSF9Tk^DGTT9u1=|FP zVw*&79>9b9ta8;!M}h-HL}GcWH$hyBpcV=YeYD)y&W|OMpTb6IG)%er>C4hv`Gh5b zeD`H(0nBjfykmx|CtiRR*+zem<^%f{jAA7WPD!(yx$4v;Wab`Z@t(%KL5$sS7AFRc zmi)(gPMIjlZxqQC;9^gUwLVZC7w=gAUE4*+;e{?q*CncsuWFP(hgCzVPi zbrH1xQ>BieM%1apK^!NZBo7iCW}=OMbQ&5w=?Tm;O@F7o(@XYbB3a*WwMDPr`#HKy z6+bQ0nTL`5NfO3=Bzsd`8m-QY*3D}j=0=Q`E!6&iA_+E9v{c)%;S=-EJQ3@=k!aFx z?13$CGcxkuK&P0f5u0PeEj z(t+p)W?Ed&OJjg1PFgm?(;#e43p@hNF>_;D!5(S{AOA3!e63fdc;Fa3Xl7m~0_{z? zZB{!{=rZN@I291&9(BlJ2-+3tG1b<(Ds8T9v-*g1__rI%mR|gBNRQ&T;r$HGd^5^V z!nXX!rkB4#I#*6MU&Sqm8RbW{+44)>xaX>U>=Ck(7-=NY7uX~l7bA=wcus8iObesx za7P&Bc<_XA1Y!4~_o8I3bzzseum+SSF~Yz)@r|Q0+-K~_>=V8gm3h+0Y)rb$_5`5O z?CfUsw4^h$vz6?KkcZKj9XyG7XV|4FO#_Nygw_+RQfa0irn&T$W|R@t2|%RoGjeLj zl7%oNr2qy>8qc!DL3R&=55750GT5vJ5TTckI30)P)R|d4A(Fo`aNlk$1iJ9tNEFkG z&~ceoybz@DrIYG-78$=mQU%Xh_)X@s|B<3Q1kVtHa_kcb7N-&1!6$ez38M8V*5)D# zu$^^a)*lz<*Wt*(wC-(3q^141SYoTgqX|bVxbRSLq7SHJB{-ZM2TpD+)s~4{th*sd zQ>?{OoO+cHA}m%15_=rYv7XTUI%KCdS0Lnfk#~Utc{$MV@`T}~6O-?VNS*wJv_xG^ zqKiSEy^{2kD8Oz^)JY<37kq$$a%@28Q-(hINv>cdy5Ja&%0z#PW7YzdqeOP}0qU62 zu54GYWd6uO^cDRtzF>R)S4oZE2Tnj$pgR?#K-x~!^BqV^qri5jBQ@p~a`TLEQ?Ggw zgPBZB^vzbA5782=$FCen;ioN``LEwj277;(zijzT_6UaJwA zdQ9%=H1oH@zS}?3<>x+g=;rj7?B}!_hC4%Frp13zgh!0Ri^M@u&VS=dqU~4P)HIwN zS;6rKNsFULoHz|8#&`6LF)`2y$Ltxoz zqYebsIvvPTs~Kq4H8ESm^3+rv$W@bcAV-bYfslGu2eQ>;IuKNG9mrBV`VqwGtZHJO zZh(e`R9_vy_*Vyl>RcVjQk^8AT@&+oTwJX;Pp2NktWYx!q#;mT>eF4KPbJ~D{K&e2 zR&t=k*w6Sv!NGs@qydWm=pO`*NCO^a2=*{I@e6G_2sq4~JMGR61_!r4V$Gpy9}$Pf z(ZPKN#U_I(X`hUT>qX*LB8Fph$7A+7Y+tq(rAWkAbfL5dmd%|C=|Gm+PXbz0PX}Q! zPRDz?G}QJ3@fBj~jn1oP>f$p%2v>`BAY0AX0nC3f^%$Km{GWKL;*QsOsVM#!`}kIT zZ+J@Ht$)SQ;3+llRbh;;=07%4^LPGG&Ck%hWT_JwXjaYl(Xec_s}2NJE1U4Q10-QH z{?ae_mK&Ns(Scm`z7FK5_jDkn-qnF@^`;I4)k_GNxe@Jyd6W^$QO{af*{T8&+MlCi za@DOmagMrH2STbp0;$E!9T@dbJYl`bClsch^qj}`mE#&sBB+iBBzYtvkVZT`2a^Il zJ;~KN@2cd-46yZKBGb533`oI}HJG(nIyXK1WG z6e}~#dUtc4!?5q)r3A1D9ETUQ<~?gNpIWR}!oUQR0M~_Mh^N=z#EQ`?;gD)f^9C`2 zDn|+hDn3Q2G`}z*v4R|aFpm0)QBHJg*n2S!1=3gWFsJGQ`#EMp>X zvmm!YP2g)8Cfte_B=`adIs?^Q#VL9W6tZ-A-0@X|x>1sw(ZY!w9*D_YuCgPiX9q^? z;|wte$q)-m#_bSuF-jn#h0W>+6rV=VgUBip>q%jCZ5b(F2P!)Zet;hY+=QD|19Fp{ z@P+Zga1Cx&>k*OIU`pBniOpDZvkH?E(iIR^OD<9U1ykkVZgD+*7N}eee))QZpjNBiiUiqiox0*1|@ayjD_ znQ!Kvo6V{n3o=i(f^1d;g@|8eyR*qa)tk|s+QRozXFp5h@IZK6W}Tf3i1i2bkegWS z8tk>|Ka$+k*)pVLo#jMqon0?_z+tgHT}dzQN43>+4fX*dEpas8EE3YEaixnkmqbvJ zIZj0eM%*lIo~g_ekXR-ge3gm8OR3j{V&sN3p>oWh_-r}Ef$GS}?Mc?A_H?gjQDE%$ z+8e*90L=d&l$LGNt#|@Bzcnl zmG(`JTZGFKq9_yM>7uCEB+|Eouk$36z$iT=s}8?x_}Q0vyboyV<&`u1c**evT*@hr0h1r?hZp>_u_rlAL~@dW$7^Ot`W-@h-jr zs?W7sS^d@F*M+M(RmKc^N~DFx4YZ$u>a(@#)J24zgGk|q6&=$0uE(K~{B?l}ZJAh6 zbT*WB5mu_@pg@*t7gG9#Du*q8>!E7t`!_LsEap1I+U{n#Yj?BUwYymb+Fkd#wcT}h zTie|%Pwj4&r*<#Uopbky`8Az&_ul5Yzxf@=-`SC2wRF!V=gfr}gn#yVssEhC_urUj zJue572#o60m_rNH+JJnmdFm@2$W_a9AV+v9SEvP4A4}H56SA&H~GAd z&%-uk^_ULls<;m1s0tkjsUbR$t!~wUpt@EEvQ&QtAmS&WZQRVfj(@)3pN0HWgFjW+ zEsWp`p3vR$=z-q67or@E0fn`EWQi95u<$2%SEp?$v>8RjLC)bpr#w5c?V=`MAJ>BNlwN4rHlQ z7yutPW%5y#`p6TF1YCYZG2=rAs?EBz{ndIM=&iohfnxQA4n))^I#8hA*MU6so(|-y zcXc2~y{Q9O|Dgj||Dgj||Dgj||G|JK=VN!pQ+!B&mhTn~>8%FpK(Xql0}<6r2MSaV z9mrE%bRbuqt^+yh1RV&e_Bw$5zd8_9`{_WI+MNN&sauf0Y4wFi8IMh8{2Tl;7JsTz z|7Nfpe@t^aR~C`+xftJ3Rg?03*_28>qXWIwqdHKm9?*e^D%XJmb(aq0satd)R}IvG z9Mw+;LaLVzWUC%J5L8_l@b%rY8%=!R6wrU zr~^4_oeqT5Y8}W{D|8^JKGuOOHID(v@psWXM~=`p%4;V7@XM@9J}E56Lp|K9i;<#>QEiXRoOa_qXIe*QakEE zw%We46hEkbX26%{w>n2q7ph#px)MjJT*ZFa@7kukfWZ`fsp#Q z4rHs5IuKMtbs$UK&H&{3hUn-2Ql4o4IgC#}f&)S*W;{##U*SJXe^G(`+~z&~V9BL8 z7RNkG`s&%q& z{%xElpbv;N+urHirjX&%#QyBI7@GYn!L=fF>LWfRWd%r`s=))~!1${n=R{PNpWr?o z#>ZNRd!U%vvO5q?VHE z2c1LEcms;NpfVrSj-Xb{G_M0)P#+)Ef}n#8sIvfC#UCqPh6wI9DTn4PNkC`|dpd{YToc+b*@ zSbDgar1USyh+RrADXwG+PUBO1mbZ2awg3-CDmO7(tA~?Z&^RAdNYEvQw+9$+dD9yI z>u<>EO7s2(mq@hkuwV)W4I0?cKbmR5aZN!Y6GUOugNDMU`AvuIme zISgrG4P?3-3`3vey+>pr!CE11BN1DtQ(Vw1KIlBw1lkb3M;qpxwt!>a3#}N|;{GhC z8}6@5KFEPmlkb8_dW@^mU&_r}#i8Vd9CTrP^&q4qqe+|(B3$4;ZA96|4!?h`zj;jf z<${Z*`kNIyG~3_wm4xD3`pf;z0tm_UHwUAV*xw9C4o&tq+d<#!Z#GC~X+6VV?QbrU zijiLHRZ>BJvnTRoUG2=@ruv&nEd>bqs_!I-{V?)dr}heTa-M}< zkssaY_c%FRha%n1Ar!%FNSZjsv}3*aU^TfA58||fKLxxd<{M4!;DW+FXaPa_hSIKx zr^5{yam6Dp+Cb~Txr4;YZ}bPTu7!^nXc2Q=#C!(<$G4{O0uh$Qmw+%Vp6llRpp&xL zrsQ>SUdkK%lzTAc4D%WGM7#&;?t{K}3w^vFzm)L{OriHfd?emA!W^S$HnafX{GR@(DVYI5?a&M5CGO!T{)uI_eb32= z_dq#5=p%q|P7BUlz6aAZ($W3UhW!M;0I#24iw55mdNipXZoa6x?GDu?crc*B1X;08 z_2c{L-1u!-qvDJm{oF*^hWL5ql8o)yZ*%h+;M3fNAZuBkhj=5S8*@cQy2m<#Jr<`A z*rT<#AV-L82;f8s0ae;e)Ou$XXyTA>PW^njrW6 z3KpyhQYnD0IKm98gLS?9#5odEFV+}7k7<^eUjG6&=PrKENw+x1`0 zxAj322s+#drXDB#`z&bDey{~BY%@B#Fbx1K^z&5$tY}{cz?05cJQ$7X>hd_!k3ZJs zagdw%N*^Tk)y5R$T*PPccqCca!vKx~pb)1#YpLFCPM@K+Td8i@>_~O%Zw|-n@nAT9 z*cI4vKYj?~SHG+^Dd8rbOlYEJ^JR4;b3Imge2 z_2o(X1V8>$##@Cwz)ifH51LHS5k_d6Frn`As99M2i^Jp6CIAx&IL1(T3jmLom+)ZZ z$olf)AN1o-L_At1jD@l1Ro4|~+bt8KTIVAJL#DNcV?OO3Ow~z4c10#UcT?|k%nhbpSYJ(~&r5du!}Phydtf&-Pd_)=7M))6?}N#yXc%emkAcLD zZ{wev8N_+~i1H17>))C)-SbZk1aHY||GA3K|C0MQ>eW&VUU)QJQS3@|ja=EezLaM_7e&{o2reha^-`su zqz{q2^_dEFi&;Ue^pO*I>(x7$Pr_}qEI(fPg+&PEmGUf|Y5i-3(FWdcl5wQruD)T@8EB^!`dvN!0SG20c2GfP$p$)lwtd!K;>OV$b5G+DC$ z0N|Bu0v-|%n<}f{=+x}jc<>Z}_h*Z-$sFdpuxT#ra=;2>-Lp{T)-7?eZFV88^4U9- zUSmXhBA_1mBS@Z2D9K;zlE2I)-yo7V%K^<;s61B0jnV-(^AIBxBSu1i1isEb9+bo!=k3pIBu?Yn-^>L12dJtFBc}x$pw5&%N zvma7@2Li4SiDJuIh+}=Zt)#)W+QDQknkV7HaVzVE{`a9|ja|#vQPA48Jd>rtQ2$6= zmd>I6A^1&(G5pM=ht>5T3@#w8f;2bv!9K0q46V;`{uXI{g5Tt1(9+K73Owr2T?l2; z=RSg42a|sPH2Mk3O7prG>0_Nq|1`0UWf9ZQF%ML{ja)HD6-=OwZBSD|gR}q7N^EsF z&Lrp;?jwWudkn7G0Qz2{N|WeX2$xF+q9#!fCh94Xmj0y%8a4D~c|PbLMrcVIE5{q? zIc)qvHD;)8M&8Mj8HwctW~slqa;>xz7aK?yVPjSNC_=-r%JYgQ6W?Ak%qkn5J>VKom8b=zag82-1Qwv3Y#p{ zORO5gG}n_5@`bjf=bMnKvH8w~KC|k_OKf#mRyv``BS_<6L?&bSlipuqE1Z%mBAU66 zBN)N85m=?+?#X=|>(@h~&Uxm)Lx%wH&olps0G?-_#Vcn`mb+m{sa^my<2>^ko!URo zd=yh>oM(>ps8Zf7w!A#kRv)O1qHgty`U8skZRw$rSRb#bC-ZxRv6yjDfR%nnoxOiP zeURBZ=hKHYz(1e99%4)Je0q4%{L)dNbD@2l6L#)b`5IA>jekD&$jy-)%C1b#1c1HCz>7s}1-g>Hu4n_g%RNbnmz202J- z+wWL==LG;ftKcy_c=papOv%}Tg>=Cxr=j~Y9r<;{a>9;6cpX?zR%4mainKFfbXO5J z+=bog!VV&AjIl@hxv+n_uuUk2OoxzK9^PbF9SyVzSQ*;t2vpAS8U7E`(tE|U+txWe zKFc?6n#{lEn+*H)NRV|5i<`-k)aUNZ56ZN5z7)vwQOp8Yyb z?{d#DVMjwgrb>S#m6%?An$}6qvJ`C{ml@k)CQ>AMv8Gdo*H}y`!!gooJ`jE}y$tJ; zQih%M`nwGK=tfCxN}?|o#yC?f@h?OqdXBv~Q!|%A(xw*KDQev?ZvRBxKzXQ-9NHsoh0s-bthq4&6< zH;VKoHbJkOEL>hC^d^MM_{)XMcJ;&vpX3)57d{HXxI&U$u$h+QzeIm*_gn@tG#`p2 z)?YUoAzP;+68fZV+Lv(qsSb@sD6{lpJzwA0x?Ls3l}Wdg$tEN@2FWSONAa7y1|`>2 zyD{1`8x+!!ubgeLf?&z%Rljv=A`|U}NS$bpv_x;2L~D$hYFD-nVWNd;X6i@-{nC_M zuRN|(#e~jhB<-;}l`H&bTC9Oe?!~87;99LEI4RK@20221C^nV(%VK~279Yv0sfiE* zsH;cuNIAqA{Z=GqE7`U7N{0I+mqC6MiM}P+E-;5YkzUUDPhKdtr)A+qgTF{(tHXNj zcfGoi_yR=gqa%=Sl_-~+Z=ho5G+LJ!T0a|c=_Tj&>QK^>mC#s`{Jp3P&b#?Q%P)B_ zXh45bU!%BeK3@1n+t)iHKJ}5e-Z?cZwK$bHHbC>(3`E|Isfk>3X>IZ$Hkhx)0($;HpeZ7=5bJaH!yhtfIL z@=b2xPIn6@c61-mq8+``XX6ebuPsfv*RDJH#06M3|BE%?EtYg|=6wVK<46XDv#1Ul zRYnwUWvf#y$e})FYOd%T{dNCE{o&3xr~bsttBB4 z&2;~Ah5s!6*b@I){L5GQ&*ES1=|78qxr=#je!a?ce7D9ZQs1q@7b-@aY*oS1z4ned zl!LdhLYrD2`MM-D_7FY6(wB+-_A}cBGqMO~+C%$5m5u#&9c0tkCaJNstPN`YTc=Z$ zw?nvQ_;56nr!P0j>`(LPEgH!F6pfkF?N_l~(%zzPSq?N&nC$VKY^B#^c#29M_f_^0 zq=mXvQWKA-PG4CEzHs%5=}hN*1%jUP4iI!;)KNw$j%JDuOpz@y_LDzLQyoF7at6|F z#FP?)0d9u%QAxirjer#tfedz~1bQfJJcT6-d@fc22cF*{^<&7vn_x0EUJE}xmDB3J zs#6VPs-u|dPNYiS44#tL@DI)%^-Bu<7kFUQl@eVNK8{JcF^QavAT6xxujve&_cdVr zZO+8MLn0fp!0pu&)~>g)N?B1 zxtc6~54Mx&^*FO(7kO)X_Vob;7Vx|a*z|=*!CTQYi~!5uQUWx8l%JRNbU$IPr5te+ zKJ6zgU_!f~ryTJf_1k^WfdtuHFZbhHG5$=m7^fTKb%Aqiu1jD&Cb}6wJC}O4Pi2NH z&K7?Biz1-3`BFKTL@&{k$=F*#-$-DvM!V}g=*Tdu4+Y)DM;B_ezXqk`G8cIO`L+eV z@pDhidrGqOD=Yih88R55dx!lwO*?IQ8I`t)H`%E%*1zAx+n_!K%=_B!fT;1E;TtkH zUUmmpsS5GbruiRBq14bzh%zHBK^O;qSp7v!JgCS zbK&5)o1w3C%E{g<%p1}vhmD8VW7P;J5x);q%AUIz?p)UqIA1P4Bu91m3{3GC?e?Vd7zbU>O>Lmem`Y2jk>Q;lV+@%#gm&a%z#5zv1V z3L~Rl9ZTqaPL=YSt|r&g>-G%FvNOV1d7Z?^<9!0xu{Lx|6M*0n z|1ir){)u=G)YS*A0}F``26U_o3i+V91lh%6SuSYnr(Skr2^whbOj?6@FT3SFXb3@e zwL`)MP4Yp#2(sVwlYTtwqfs|_3k2bP*IueC{Zv_`wh(SPD(ks!mS_2(H7wK@26TiA z+TRBy2(n^m>4JVw}iLWy|*CXCOW~JZ=Ac`kLU~`DyG45~Q**!o|p{bIOUC=BabPYjvr^%}>Xp9f)M3CKSQt5*3@In6|$bLBe{rEPFUttOq zaTA~JgI2Mi*Ba3r?t=F9LDL9offM;y&A)%*WcMQ;OsO9uXt<%Y(gl6!gKi<{F$1c0 zK`;BDB7$szM)~n4G2U*S8SEy$&IbhuvRh~V>4LiYpmi*$-8yru3kvz5xdhp*Gg&TZ z>k_ZjV_lK0@#9A_KF5gc1H^j;O8B5F39`a}$pt;>gU%wT*iahgf=YbQ{si?mDtDO+ zI^PHV%re|!K&QH(qkPcE1dTM=?dgKr_@Gw_veo#*Vm#RDT7?Ir5S0X#n(XGgplLp+ zKSA%B>|Ss|kNKd}39>od>&G9?c)O8kfSdRdAM`uRVB5ZqF6bm5w9+l;L4N#uEOYu8 zth6mUp6ot-ojKYekUqM2Tq$Fk!gD}@r_g0Ke%kmu2=EN!FJDvejlV2XSdv=7me96; zC9fKP!JqkmVBd58Iq1wkUjYw~40JL79H{(OEN{ml#9=%T)}a@ulr#C9DeJCmo(D;Y;119wpOfj=q;Ya}lMrj++xPE^6 z7=+SK=x9I?w+idj`^24@mf@!c*T&$)YxO*F8`)(+I%4UGKQmA{DP2INcR9il zU#K_1eK3v9b3mMC-$wkwd0KT0f5bo2`~vk728LoihS-61svR;R{Bpq=yeTiBz*Na$E>!3XpO1{Ct7gML+CQeTB`Y-h7qBQ-HLuP!3 zV(FcZcmkE;4tI3tuQ(L*gyW%4dOFh<+@|jLrL&&~_zuPIJ_5jTDE}hFeXD`CT1v%E0NN3f%X*UA5@tA zLEI`#RcaN33;BnK=$Q}T2+3cs;1uWNrVmlzSw5hR2E0I#iM}i);1M724fKcWeFCHC zx$*Snb-${N&(_ysfli7A%M@h5&R9Q@&?xaRzc1z^JX6CWj&zvEBrg0yHSeShL! z5R&6hEO{RUJ$=|$&>8;3)qrI96W2o)pz8Y*XAtve{=|`#AQW(;E^-Tty+J}X`CTgO*}{UUk`%zXOu>}HJ*JW%?+^%n@T`_^9q$m8ir zrqBcWvzVP7atw1pB|d0>f?l=up9?zQ2mQq_@D?u)W5CRb1?5*VzXtw?Iex@rxM0 zliAy~mz#J;AM_eQ)}&rP*I{Ed9t?ZK-5eJ9@i#HvZZVtWCLZU53JJ2?w;o`8dgWZp zO>TDg)(v=1(PeXMyWO%VN4)Ap;#@wJDeT_-)7)&2^|M{V0^4tG9~YG6gI*`d?#-wRL)c3);;GO{NqwhC|m|KU~MY9WTcjYX#8QrZ(z_ zl3n0KYW{sN$)&%I-)ld8cJgfTHCgK@3krf>uH3U1Kb?*4L2SQfMqMvKN*SZB*@yxW zf7JC5h}cn=?}M`Auhw?_WzIwEneqIMd2Tt1iRER<_LWbybf60I!bPh6Y)xO`7yYYTGt>p$YuS%PuuYqU5-0|R@=W$DkH zY?QMq62H|lf$Z7?1Srz@LC|VNUfLpG^TVOAckwu(H zQ4o0q)lwMUTo_%RSir5D@t)#jEQ$>rq~3?jJ4V6t>Z0gRIAs%yb2>IG%ltA9XHhd3 zz!b$w!g)p53||0dBaRc{6nth-#t%G=Zi7rPAmHbgW{#C&BvP-a?_#iFyt&!467~A` z6yAZ<3X+0fwN6c~0jUL6A6U!{l=l|B{Y>h6^Ckb+>RZHJ6rZV=O+n=3MOR3KH6p8% zP%Sv#30K6hma!F$Tic_{3!_WBVCmPTT&-%fGJjSH7S~@E?oFNQPc<0Gb@YQM=sSvH z)5B#{lVNj62OnY=uKJ9`d7Tp6#^>rIfFYt&XHuTr@2ft9v=R?PhxibX_*g1QX97o%&42C!XiSM?UCQ1A2?`ZzH}ib|Wr{{VpR?yD)ZAxG2^+L>|VRYrTqF67?`XTe4;k+JL zv)m&eJJ)z=RG|B;NDRj4;z<6IjW2bHE)G0i6L@G=m-Z`j76lSD7$kRj4}V57%Z{;2 za%5tf#W`Ga8vRP$IbE0L4oqpNT@ft#=Tk8c1!*|}yof6`B$IkoG-G=M&x!_nkTT9ckzcQF#4X8Tfl53v3*j}r4M0}FVJgdxB?t1l>qzzP_g4l#I>A!;Lu!8|r2ra}%j`LuL zZRgHT?fg4LR3L5U_YC1zcSGl~sJnnJmLVZM3hOQ_EO{;BO`hziQ;El<2U9I$#`kl+ z_}NsM-+NV6ZJg~sRaK$0S&sfjeKU_U~=wJ7>Qk62-_N9BSNT%mZ2Xyz7b=7w!W*q|Eg2F>gl-qIr<%_$8Vzosbqo}0-MWYYdKlSfe?@)7s@6-DPeg*Hv0G>Y5N z^5de7wsI@7X`*6-gDQfv=OAQzA%q&aj-M&UdTwWyocte3lpB#eU_SxYsUx9FM$CUD zzZE$9&eCX{&(1I1d|; z3!@85`gY`=hZZyW>8XoA2_J%JU~Q-*3!4hnBo;8R0MsYEgODoR&}} zio@CL!*&>(nt011R%@Y6>kbuIRPt0L)}biY3Dt^L1I^fZx*019XZ64+r$;`PYhj^A zp!=M(R&0T7#TNUmSl13epk|}Ix**w+pE|ZTpC?Th)>po}y+PfAKWH!1ucGSp>N&i` zdLz+qeIrY*s5q!ptY&i)+gEhF6Lh>Vb^$hKFDdC$7+qW#yA!Pwj{jm4i~|J_DvB-# z@BP3px-MSoVw&CjC1`nF4b*C`Rq#D^NJUNQiy)m1(xop}KwUrR5uIOpeIZY|2XT=~ z4!)vKwP1?EIXz;RBA1oSC1i8)8utbIdrE8EJK&qT za(dtF67d+$dS`n>j~J}=XpQ#IbLYtF4!=O-6l~)LF|_b9^_HtrFpP>~PtYXn*szB7 zTXXj5SlXw%BM)nzmfdXZ(>oE_ynV`a$>D%#`xNcfW|)A9-fPed4@I=}2(>B^?z-DPJuMj?9a+6=ueupKQET14i3X6?9b~pDc}A) zjil1-&lS=rMWP=_6C{li%stacee9ck5!`XO4JVJ&^x|mB-2S&&4St(?u%2K1L6An` zFgbRwR`o-?IDMn*5~s<6jI|g$^jEb**gk%(?@sI3urv}&TP7B$d<-og+pqrYcVR+a zMHBJ{nvm66X^aUupvu?|b?PnTg_fgoEEv;{W-(9JaoK4waV=B~EwbcZD7lLrb5k|G z2ez`t8&R*mn9QQ!`bFLaD6C~}S3k_~7D~G!p`wRaH`eUg1Tlb{QKw35N%&HB2Q|no z*$y$@`7^&GhD-+$X_u)boMwY0!Fibx9c4YSpPP!CS_CY3rbG6oqU=!62wN zac>hhB`x<^2B)`pl!W^d7bi{@nL)P(0nvml7pVLVHAwXN1VB%6Gb}Z1X|>B-E|Xk_ zVlJ>Gd>Nb8qmcuah-niMv$CFvSn|4`>Ij_*YwAnFmoU{PNIdlb^Fo7_W);5cr`jMY zIrJr_>cvzund))5GR=oOm=}78rphwD~KgGhzKoisKM#j zlJMokokQGLz$I(=XBty9<~vZaEsfMkhSYZwTN1vQq#7oH)KVdkIh!7+z*hxx#Io|U z*6dMNoAf=U{7lcY9+MB?kBtA4hue_WujF1#;NK6B4CC49@aUZ3bo%?~eyUc9nbjv* z`=)1SnRm$&ykjypZ7{Vqu^)ong>g39wGF&N=ldMAa{qx>w?$%woE?bst|vl)mgALqDrvW|03*`yi^VDbWVI%j1OS{VtZJYF;d4zK*Z-123?|OIdK1{L;J_(v^Zofa_UC7z;I=<69&Gya zS11juUx`eA6VoN90OI?bRv95}^yMRVDE&PPvFy7L?S2=eK$C5npl<(xA43gc-^GW!S0gY-cW58#;k;(lU(hQnyUS&f93o7<~8KyeP7?b{g=M4 zH<{?qzpsJN;k57TRB+h*_q7w?fAjm=(aAx+uVvs2IY5Zhpgz99zQhw)@xQML1Ulc> zbPe#ouNVP;`h8VOvW)Mm7)k#Z-xt=*H2S^ONFcrWVq+oYlE)3tVBuhq7#`nD4% z2lRYh=5rJCb&=TZxka(wIdFjJ_SW|gz&nEOl3oX38j5owu~vng^8pLm*+2u0X-5d}PsY>UTKJ-GZ`G6;dn^B?KeD%&F4+wb)4%KTCHDp;nO|+DyjCjB`g10T{pavg zJj~jEh-}{eW4h!NKr}zJr_~7B+_kN$z5|8Y&T^4eA9J6xyqApxO~0qXd$IM9#*2gH$Biir)iKj^?` zzYlYE9I{gUD%?mCOl*M0b9?2R*WuWBxe@@ASe>9VG=Uxax5QbJ_%IlOxMbJ2bh-Z; z2XQ?uDbkH*Y{#fqCjl-~l)k_(&=giv;4(8AA_Z)6E5kSkw|~mdU#;Imn9132Q}XxJ z=gGRY`!nkKGn5OAf9%!sD=#418dJ~Pundd!Tmh7P9}uTZM?$7sjlUcobbQj*Uw%g< zb`u=OZHwZ!LHE|nSKb*DU^rv81jk?T7JH02dh>h7jHI}eN^k&L`J6k@$h6>G7jDtO zS}+=&my_{$DQa@9`X|DPZ&87KaS?dL%+x-{-LzJ9k%&O$P>6~yCb%51`L$jhgj9Oz z3&uY~Ay9IVJHx-XGZm=(oM~*$=6@Ne{7E9LqX{t;UDC|VmqmjDqu_f3duF=sR^*gC zNMh*eikHF@QKv>fhZgD}&`Y%D*Nm8*$Fk;SGPNE|lb=vJKeIFWk#pC86N@}TeyXYd z-e_h&!_PAkTN1`ZS)KYCIm@`-$JL|!Z2AL+??U3H61P2#Uai4xG4y1H@FC)~`@uS> zo;A4shTgfPcO7x(NsPq}HaLA%a!I&5ah-_Uhd8zsOZ-yutMo=gP?h?59sqS-{hiDnGNjgKYabEpyvnCJ=G!f)agR+%Hm5NSB+v3b#^ zpQ_Vi#8ncOmHttte~_u5WNxZJg{&m?QkF@|z^HykJQ3!8E>b7OIoUYdK!sc#=@B^F z5IEQn=tct6pX9EHNiIb7Bp2Y1cy4qnn5@ru<^d}1`;YPLuAjm8yoim;3iCZnXT1s@ z8h+21e8TDvc%0|M^->fYV8834*iD?8Py4p>cubJojU1a09xM29;qmf2AuJa^$Ew5c z%JcMa4SuoifMij3t>oyTI+tAOB;K5x%a$OUcYpSLhTkz-0>p+?H#|jPID~j}LasYk zhKaJGSa(bb%`f6Ce4rC1%FNtqMX{VnH9EWq-&bMzoM2)3+#pIp<=Zp{pJ^L@riYr( z^vco5B0eEpgGhg(z3(E%jtQe$5QySu5k#|7kls8kfSedzEIlFH%TIUIG5x)mHYNt`I=tS*TqrTl zK~jti3n}NwHAY@ykYgbOFL{MQir_8Hk4xGdXgZ~VKkZ$qq4*Wi3%DK6diOj zii!?uTu@v_GYTUr&SVswFrt&05sho!|G%nkr@Kjz=l!1N`+nd1doSGn-#T^b)TvXa zmRtAU%Hya{HaW%EHnZ!t=9sA;^r9&1^*^Ycv)Al0RBZv9nrf)p3Wi!Sj)Q*C63?Kq z(GvX>byva$V~Oig$Rp8;xH+K_>awwVspdm>!#~3Yz20}xtJI+PJ<9;SnIhb=J~veo zrg+6nbXaZ5j55lMRn%PxRYsYwk>!!oKz!a~{>IcesHe;xV+ey3q1&3fY>U$DT@0}o z<)FFq4^5wo4j9Vv&#eKmp9qwHb}-a^K20@K{5{JWtS7JAmcSj}RExtZ^WU5NZ?SeZ zCwLWA^($ML6gF<_S2`mpGZKo)ZsC$ib*uXj8qq#v2)9p>@odVsy>^ZIQ{Z5yDnVj@ z!^zUv#ESdF4Ed?{w+-CEl^coOKvvY z6=R6gRjdqaQUR0Y64RtAOqR*aaxRFN|2>~#dbDB?;dK=YS*Kl16<%(LQ%LM?h;tMX z?aONjl!v5izBRs?X7s-}RR6(Xvh`mM=b_0JW2pX5hBbsb?JFDZGYz5A=wFH|J#sUM z(7(-7Z!p}O@UA=!e2d|pOYR#K(Vib&Xjm^cti^_PAX#UIv5qpVuku}YNX;{>UvC2I z?-kLO>M*STG^}c00Mzj7Wc?6CHJ=YQ^LJqcHuFJ-UHv%-u-{GgE{c5y{lFH#)$lJf z{40zmSCId_09g@!+d{87it?_u~i8Trc% z|A%*ipMNYSLjEV(OE%nU_|@O3M|mG1|LWuUA2<9x4S$xAe+~J6#%ttls^4f>w;I*g z8rB75U8jh)6)!ccZd3JF8P)`{>YB4zlML$_M(Rz5^}CH=)wO1`Mj6&)M%Plq`Zie` z713t>2tLMY|D#c$%&9R9&m}ki2va+g zef!&5^;|A^3$Svjo@}?x&%3R8-J`p!qwof9Z>YMSu-SOY*0wqPH{ilOF7EDkC6p^Y z{@L`>9nj;~oJVt)sy5y?H1AsS-fVd9B=4#)wg(g&z8}=LI=d2XG`v@k_bl>a+d}=Q z{N*FVb(rNy3%Sqew&q?n0COkq(9&IOsDUtQ9dkfg%P^jVMdX_ZHMIAMy7a| z1c=hpsi391t(nO=G9v0DQ>g9v5z!D4Q88Z)AoktTTKFy9DOpi>B@`K}EWRC9;j?9J zg0PzER`Gq9j8r2-tudh%mKYgc%HaLWk%!68htlH63;OcNAHzgQBSJs7-<6;&grWS8 zw?Tv}tRg>zKT>1$938n`VRG9_R$F?5&ShEkkw?Sw*7GU zJTv-|Jv(;3diqGC>f`N1XR1Wk)zZVRGYMQsz*Pv7V?^m52uuGhT#TjDL(i^+tBs5o zHXzedmFa6u^mq8T8qRh3ypctT4r@5~8>#;??(`cYHIGscnHp3wB9yCF3GBjMY{Wcb z#9U*<#8b>h5Ra@gvV@8WUvbeJ9@w&CjjX9k(On6@HnRS_9bfM1_gD*obMyUudI)ml!ee z6!Q^?N8ZDa{ru_3t11A?nDXBXVWH)zG#RHUG2ykaPGw>1|2H@o^hqmIl%Ff_kD5&1Vp*t<^ze^f-cn*E9GV#}(V&M{gI1Ru?@&S=$> zTE&2PH@9Q$8nW9NG#!PDvCuat>aK)UM*OZ@Abtg9<=`iz zycZxCTHaX^;Q>&|emDk+W>F^zgDaorH{zJ=T6wvWp2E_HDAvHWOq%1wBBdZ6jW zD&CpN_x@}NyzDjS#UEYxV_qc#lDD+>x&6-=~-WDO~f z5vxSfnDJxAfx}`jhCeuC#QDXU%X21LT5;==1!;@tESSIOvN2=Epm>y*m03_wSYl;DEtGJ{nCvydtH4R_J=+Q%phgrUi(L=I^4Y$=;o_AGFfwiKrD90+v%`C7A z3$k*Uw>~J}q%&&&GQZKMFaWu21-=XmgZPRIf4Altek8l&MC-Y%@kXiwnUbs zBqTxwwaig5s5h&4<+2$GBH{0C@|IB^9h>Q=ELyy9 z&ivU*V4x^xSzc+VFo@2A!&{cE-;@mPuEUHtp*0yjI;?$7$VD8&WU2x`gH&JvyWx2_{CvMeP*B zqy;6Jc?GPgf}Bu2Ib)rl05nf(uS5a^UGkzH7j zgF(T{LU#&xF38Er&dCXND#oF$C?`9QwXhT&xP@L9I}M;s6j@n0MQkx_IoTL)V5&)0 z=5kQbWFfarA&y8!eU*K9`JqP<1|k$F8>2J@Q)O1*Y*p9!R@%y?`FUB^yu7TOf?}3n zVNRg1sKi>j673cW{?{tO65_a?$n-45Xp_M{-3-|xZNZ{B>GLjHGJV0qg)^rwMq)J& zFaz{34)Q2M=8C0x%U2ezRP%_5=rxp|*u(zl(bh0b1474nJH4JJWEbWX7Yr-0GV}8b zSD_+M2g_HkK*yt!gv?_ghB=-*zm^{?V3=siU0Kl925fbQsYIL+br7$V(D9}m?PUOK zh7~)^Zlf>6SH+XBAj=$Gn zM!62JF!)xM!yNS7h0t>grf^i+`-6xhrT~_=+r|o4lGX`>U-#r zpuY3y?Vz9hg(>p69Sz@-+>;{=af?s zf_k9Ditda+DY^o1I2{iBcQ}N@C1V^?x};_; z8Z;<=@Zf}mgdszR4jnekr;kI2M*i?I{jcRehg>~c-SJU_qdiHTM$(c?GqW+0EL(nah*`0+Emb>FIdHSHq#&oX#F$KJb`pAFMwywV=IqM~F)J|! z=m7rmJHLu~&S1tPI=pnJ#y=Tc`hT@-xz##hvs|Ggr7C|w!wo2KQVaJ;5P=ln{tdYINRWGqt`@(0aL!OjGpUE z`1c0SF?_iOzc=)c4SysG9FCJX*-xAd-+z_I1-WjVctF{%e7w6QqC1FVUY?UYI~f`5 zPCMi5V!~a4u_0$STzheLH?$r=jtDkSFLT{Hgl5O}0qvA_=yrUhZAd3A4kusuWB4?j zeR1+_Ck`g;@(1FyWmvc#gfkxJV4OBR0oOxtasU|?Lbv0IKAg;NxWPz${%^Q_AN)Vr zF7y~Z>F6zb!$XFO1e|@~LvirKQ{ZEv|4xN}4gudV{Obze>xp*4f5XDKv_x7>AE6R> z-CMP;9;#i@i}|k`&hUfO{wj`FJh}kA=34g(nBJguXbFRbrMLzQwo&^SWMXxgy@2j- zm~TI{E{||w?nF9!g(y%x_!omvXY@&faTTN58q>9Z+#&7|w?m2x{I`pZ=DZ!qe|Luc zv#Ga>O(x!^gz{}vQtX&W8F#9bZRy)mwx!@z8=Y67l#cmqt~L%^N*l$#(lK_(Mx9f5 z=uQ~UrnF~6dP+y*3&)4Z_Q&yp^#3RdK3OKN0reVjo%ju|Zw3{!X`HJOS|hGU`cj17 z1m!AR<>Q|KULgX&Ys7`Po~>w^_|3&rovXx3jMM>@CQB5Gi^0bh!*Y5=y2ud=a5Wxh z9#Xt13UDUkq`WtgY86@;H9G#ly$R~y3653JtxS~T-<_(~ovK~2ZTX_61-q^gSHgn1VmU0DEwaQ?u|!-h zE)|!+;uoP_&KHZtBC$}MC(=;k=Zbma95GkS5vi!JnPR$_CQ^i7oGqq`$zq~73tn`l zI73W8&73Zh;Az9r%MU{zKLkB}Jo?Om=m-0vU+;r{xtHjHzBxMBTJ2W*-+{;1yQbU@ zzlc%i4XF2Cje?bC!=7_tp>q(r4Ys-lHXRC2@WOUJF&eFLI6Q>f zjYYkr!gFsyZL)nPqDEPYB(%m`6@|!Ah5tsthen`gZp1kmUbF^&vkI-T4DIw<_{M5@ z#8qg8Z5|PTPZYo_^5GX(7|&Q1^o>lBq5R`A=E41 zWf`6@EQ3<`#Us8D?IvrHQ`_{NoL8s|I904H*2^_0qh^~FN-}JdL{@%gaj_-%V7vrR zMRT$z;#xhtzy%I5+~S2@*lTf(6`T?*@vsC4@%Xh+fFGSZZCN}iNG!@JSy@zIhkv8! zS|{j+2U$zFtZm`poMzEcw6*2a1|MY&vxW_~1t<;hEM*Flj7(}Hhh~q2IwEhG@}K%+c}`i$;-mymaBugVHxmh&F3>UY7MpJuPi9eTV8-CRCGdZN8F$)8__c~g}FvrSqVHj zdAcPq!AbFxoj1uiLJR{HppY-Tl@yP%PO?O{JhV;c$sAi zs$S(Yabf}7#%Mu3Y>R~YqH>tF!YO?%YgI05!e{v)U`mK|)+ts~hYz;~9hVE`^U$@T z8pas?T-dnfC?#%W((ut*$fUONFkQ#)8_L(AbTk@$ggoS3Ua)dZVbSuj_EXfc@Ll}Z zd?)RnMlMUi9`zUf+3-g~?JK%puHUTt{l+&n)*1{LdYQq%OFF*!PX-OW?r|Ni-m202 zuExZ_`&E{;_V}?y)2%(4UZ1YRbr))^y;ozk!3JBeCp5jWPGh2pPy3w?Tl=*<>s^gy zhctSdH8z<1>pwK{?`W)jPGg3_M8lV6!liiH#KX2z?Esx$hQT@;TeKW6W~n^*NqA_w zYQvXiu<;wse=vft%*6W*R)4AaxC>#Zynu;MG}vI|F>fA8r*nK>q~1hmXLuHMW_&I) z_0Ke1tIROtw37rJ-o4sCoK-q(*I9Z2vDk18x)$@Tg6K{tD5#j2H zaGeP!O;BS($mn28)|hk_nvj0-(k{=q@on)-3$w3|WJ)?K3onkSx7s43!ZV5p+2!^A z)30I@ztU*=G%Df=eiBXjgz?ixBj;q(+x1az{LZrRB=h&0{H;FPZv$S9cK(q#lWzoJ z@WONd9QEdtP_LS(2Q4BPsh;d`l+&?z*n($a?OGx5n)a{8p*r8Pi1}yPFr$~jq;aA1 z&Nh+ud^6q1v*(*j@|LOTO&e##)jpkn?HHX;!zhiS#^k$6V@8q*n|`g@VDg-0x{*bQ zwe@K-`jla}zxIwYlfO02c(sInsBqNkD&{rhkvr>OPr+fELO{$lAIM>ios#`FU=-@(i}MbCO;=1 zaGc<<)0Y{3vNC;LNcaTF#iEi4*?9LaXHJ^FQ=w|Qq84S&S(LP7=FITac7IfFr8rZ1UZxMBrfJ#Rbg_9={i*mklx#)XXE zOdkk@%dpe?P5QF;+V5ZNe$du0vVUzf{cVH6q@*w~yT>KRYVzk2>GF9BrR8 zgOUAbt?4(KBl<7P?!P9SUyxaJb$BOcX9Bd#n`rV&`>g%)h9ps!&4yp(Y5l5mjh{yJ zua?m-F+#t_D>Z-88PoDkq@b-|Lxg@<4VxmSOcjHs2y5t2b(@>DBH+WoINTDrDM~~P zek}WjXv~yoJY`SzzUbwzdShOT-3|_Uy`wIabJ~uO z56j!3P(Ivj2jAoH6$W@u7C$+jw`z7^N@!?mXkg0J_~GQV(@hBv4njE6^eUY3j;^Qb z)!L4UzcF~NsfRL+{uLVQjlIW(jAyg$!cT9@!nU{H$j@j9_Xm5yM6L6*c~zUJi4(8N zE6ywNMON#G;bQ1ev3j)_gz6D^VfdHoBM{Iv(xzMJm93X|qb^_I4vmQs<+rYjDF3+j zlx|yR3zpxMyD_3Y{9^e7ffHB&&rU*D*V5VZe~Xo_93dt;^SVm&VBcq0F?u zx`_5?i;5h@ZF)yoXYOy(Lgrl#FxQ!IH=ljAr9KByZ+K_oEz?}S;*3Q)At2?dJiIdu27ZdM3LnHE5*Pdkr!Cu{ zS?qD)|EuxW9(Qc~-@U(Y*rWX>@CS|d_?wj2cApz8G%oD=%L~AKGgZ9eH5O5*#RBysC^_Q&gZ9a z`HS(sTBvM0|6C*A&VN||i+v?(O;#@(@vFO0mf#!RMfjwuIG3-%?y7zfh9;B)I#eFdBHWXp_*hbh9M=E@40%vWJy*ae`PUG0ZZY1LMwiH zJQmLF73dM6EY^szA#3`0PLWqQCO34MAEf-erI9o)qmo8-l%1e9EyPyln z;}eJxT5YbdpN%gYSQDeC3TvVjk5RRQ8uO7i9vS7tlTkhPh*#^@?Ig%%6$i6X6@c%# z^kO$_8WjDIz$T8g^g_HYiNMk*A35k+ht?%aN(xyuTsKzB+rw48v@z*dE-x83;f%{d zglp=gpg4XiV+&CB_75qiafskvzc9;w)L@OYlJwHF`q~O)@Fq^O z@HLWYo0F`OBl93Hq6cWtiB-3AV!bAbcGk6FNLnWP$vqn)P6-nQ7*p85x$YQT}F(78gTsX2Ti?hYso^7xf59)Tb;K1M5Crs{V%o?#&I z=;GlclWaTNIp}m9YSPvbn*=A!#jdep8;wYQ3y(2LGn%jfXS}qpxMK-VU2|Ub|d)pMpn2vrd=TsL^lAXFngY?VOgV`RitB zOw88k@2}A^Ve1qfE;Dqm3G?r2^04Lkjl5c0p5ae4`~efTpQG665>2{lldk$aouAj_ zUwwfNSDWW5b~?+XD>LbuO*l7#&ui!b!YThA+eLS%xpeV6{QL?y^WfuYtYo z`W!Gx2NvzF+ux7&aGGA;?j<+7Cp>H~{*SeX7qk8j3} zN4J7;yCV_fPUl<> zN;Y_s&v)$luAQONHBS4lrMLaZZ`xb!yzunDq`&QtwQ)L~*I;BiI~-Yl+rP_B)9DjW zJ#qfFoJhR1L|}N}%N-+e509w=CZT%z5u6C(O`G8B5y9B-X{kNu!|dD~wv@0r@$qan z`*G<1RF~&}YJ6(U(eeWZ{igrRSZ?x-7@smse44@Ip1)tA)7j788%#JOU(=hf)M($I zM~+Xn0g}#WyKi4qQZyZ3(}o@P{i|i{WA}TocNVq|@#)z#?rJ)$pp&bS`oqQ!oAtQ% z-~IkWMnKE2F4Sm0pUL=@$v49O|GVoWiC1ZPUK`Ckq(l3k*=GOgr~ur_wQK)cV;|f8 zF;*Cxu-DAMfV(R}2gcp_ zQ)@ji8F-NKz-C}6(9;WYffjH#FdeuL7yx>zpa(DxSPS$4cLPhGM0#NAQ%H{w_iBI^ zumzY5^gR7jYZ@>Pm<#j)%Yezi8el504ww$y2h0UF0!x7{z$%~@AOF<=eZU<+KhU!q zb_7-dONr0m5BC6TfIEOYfc3zGz=J^Fv(S@apcfw!*8Bl>2FC4ye!v~T05JVI$N{zh z>lyz%^a1Vzdhjt}`U}tl*aA#t_(h}x#=Qsqfj(f|sVE095m*XL1y%twfN}fbXFwmY z3YZMs4NL_#0IPsaz#1SwSlj{h0}ql8^c_I{eUa~<5eDW0@xr~>0Za$xegyr22mcCv zfT@QOkB=RznxH4p_XX@R0Qe>7`1mok8S;q#h8=*O|9}s;4_FPXItDqwWNc3=(7fZK zer~OWemj71&?h2Tyn{jsTh6|iPJ>4_F0k1l9msfFAsT?6|SeAD9f> z2TUiH!=A(nqyshq8-boRNC)%*y@|*N=mS;({lFSvI&cTD8rTHf0c-)*13i^!FF+r# z1(*s<8wWktL60Ql2TTU;UXOABebp!jF!xrJ16Xqh(i1l#f2O+=?E`oaSPG2WgnWTM z;10$E>lvA?*cz?A21ylw-t5;=01Y*fv@yYq$m9`hialj_V13k$IKMlVE`hdy6WMCSw23Q8%0jvQY1U3RayOA&Bf!0|FKLdXN)&Mhr zO~5i>>9gPmrsHpS<3lWr```nX?nU{4O|PQ;0MqeubKE3^Uq?J}_nVNz@LQ-4VCjC8 z8+h;l3|k+2QV498<MN9kVPGBbAaEbB3D^j10k!~h zn<3|H#QzI=0eAcx^$slk59|j_{~G!LQ;#8kU=`5ghg_fq+yP7m?gOR)n}D@I&o{6W z&=5Y56Evi_=qzg$KBeR z4m>DZTT6+ZT3ZhScXY*8xigVJ{{C9>EQEWtwzdFkP6hvL*z+{Jj}J@-)&o-qVdvgd zBb7 z?z^hBbvH2eHz*g%mkz82<^p#EOMwl*8lZPB$^-NPn}B{`3osp6dM)xH9auxW4tfG} z%Md;X>B^xeyKS=mXXO z{lI;|3}75~C@ur$0(TQ{Zf$KOeFOY~a)7l6rviQFLXYa!)?8pJund?EtO4c%>wqn{ zA{}tw?f4_|^AW!h>43gXsDEHf4fuhcyTA{O1MXuyuo0LHYyp-6y$jGTfj;0qpdWY; zm=0_L27oQVDxl|XlpB}|JP1q&HUo2ko-~vn=mYKmrUHHUpd1VXs~Eo-{s8oBfj+>4 zwMY;2Jqf=!4|)MpfjoL?5RPjW3P-8e;p!6Ysdi|3Kg2bT^y7=$wuAf_x#cOoh>a_E&5pHn{+k&;#?MNR(*yHcLJ-nq^-FE+6}N|hh<`Xs+#dRtxsF6WUx&Mzt?(^W>5M`y-XyEZu2yVkka zl7@rwOTd?gjuKUB($9`fjBevR69$gs)XGlOg{*jb2$lcfp>AYuAFXc0J?hVd$?)5JE4Eg8})=a<1fkU`I z2HzF2%^usWn1njwgvWXb)N?1!M9?i@ zDE})6rnT{_X!0Mz834U6FjUUh2v&n07eZ%QdHj<9)*&5BV3(iyOvYIcI;M7-Zv1L7 z!lE$TugIT|(+hfNI~WgI1x^d}nWG_R?x0-OEf zC19vt`w{kU@vHg?r5^>o8uarc_}!@IX3*&uq5S<=|M&W_%R)Gv{_Aar&i>2-JyK3K z=!xy{uLj-U4nO@jtsQ>$XBq9#*`Ed4k#h+2vUccSgI*0f`w+Xl{czm{6Pz9|_1y!yb-!P=4^@tH+w^7pA*Mt8vv#`F@c17QU%ge2*D8HTd6~W+24?RAe(b(s z@-=-vf~t?0jQCo_|JIJ5yVj}3l1dkL`9S}T__S?)?Dt{P>po(wYaIq|tI|Cy)`x*i zcje4)E%=+kPa7$HLh_?cc2b}1ezC)jpIhlJmlzM$Ihj-)l(Qd!dhk`@TFFsk+&OBD zWBk{MPkS&#FEf@?0Q2dEM9qj#ho87pFcVk(d)$mzKSz9W={e10@TKkWizmB-ZzcGS z?*nJYHi!3t%%2DP8#P=;>EY7MqO_k;2k*0f?AFqDgrqy+ILrMqx*!=;QwME18K(t# z?e$X`phxoOgFgUzXg@`rDsXbYjtxNf#VKa|!I-dCp3C87VXUsZorvfD9g+RUE1-M7 z^kYAlaQzM;%MJBa)*JgNexJ*MP{jk8j{Q^v^qf!npH+FB<(O_-CZ7Eizd!de{&SswIj0r~ zC*Yiiczy>z1oGYgW_r$9FN)1r?<|j6r(4o^@UfrrLrw|#hE22mn`8ORuz89emuAHV zO!<+n{$GBv3+T?*>0qZy*IKswa`$`~o$~i1zU*ti(#KsLCLhWM=Mna(hY`@B`i3rS zzt4sHE$xy3I=`o%g6l|oGQSy!Z~n$Fo?&^uh|Di+-c@GLyO?e@((!x!ry$flb$Wyy zpyyh-&OJlT*r@B=*an9x-!AI&l3(OAoi)Qw7e1;ty6p0)ITG_bigXRHnshgue7XeW z-pwc zA?4E(KFnj+%5%(2R?h{M|GtHE78Y{0psel|q=Q3L!h;c1{&|%8AU=il)N3~V(^ctS z9P3}_49@#$r(PJqz0MS|o%v4$U)wyoL;qyDc}Q39P7yoM!MbaL=}<3g-RoTI;iQ~@ zmq%5~`RLK~kpSR*8oyKD4s`Es(>@Gg(?ex$s2{o)wyF>{X1N0wCbQbY13M1gO=Uz`FBJ1wC|}p*^;NZh@YT>qvXC zeqKR*8R9EL;wznL96=VS&KY#_9i@EB`1O%_tVi6mT^<|YAWOR$zFrt$>*G_zv$XGw zbEk`?vHpnLJSA^3_{tJe)Og~~1>fSea)XmH&x3DZu>taEhivc#u-`!!T$9Fw^wms1 z^mzN+!Tdpw0nOtp+`EKF|;J zhIaHG4|*f$k$TJny&3ch+HEWKSm&;cx;!>R-{j)%O4maP_{xSGzIQw3+YCPM$Q1Dc zxRpkdC_j!1od}npccNBcw7edJkw9iL>5$B}6I0MaXPHb&4x^g7`@Rp7lWu2+Tn8TGQ-p;Iru z#}&y>y)xRNQ?D}6djLb_Z$`Mb9sXUQ*MlDES8stX#vSk9he7s&9_m*t_m4QapT?hn z?uS@!ydT24QKd_@!+xlDae9jQl=1Z{{t>B{-&WjMMW-&QrPXMwZPFdGEjRHQ{)>90olD z^e%QfAo+g;Jpg*BT@o0F@vVM(idz3w<$({WwOh^xmG-*5O$J{x`0%Jwmk;`L6vWI? z%^N{h^M`EkCC*4u>%+D@`#zm^s6c!M;sx|^AA+6rynsXd+*k{isFZ6tUn|ErL;W~U z&6t@Yo+ST|s=c9a#4XJ_EHc8a(c*DBw+9H%i=#f`hFf5exe{7db4 z)mCudi7@rz{d7Ij=QF;`0;ShFuJf#A`6{83k6Ix&o!>n0C#I%|In*lw{7Y~LD%q)2 zx|v@-;+v7*r<9k5_>l2=JZNhXUp6O2lr#Qvogc+}$>jw`H+T#_Z^Uh5X zqnU32>Civo7Oy;dz3aT#G>#ma?oKuJC3A)R-Aitr&HB|n{daNJsqg;9=l+D;{i^q&X$ zNTc{d?2rH&=cx^d|JIIg(-U@Rn?LO#-@+8(q@LhK`$cZX57O3SqOSEijL61CDI(40 zYm!#xqjHNa}x(cBC(5#@0c1jyROZb~7G)-1jE3zv4WV`{RI3>jPxH zXM^qs{ZrDtX<_xQ{GaJ2q5q(M0}v1WSmm^LXsxe`459XZHnQ) z_Lup4!Cx|_ricg8KHQJl{IlqZus@q3TI5XK9;wd^@Ox7X|Nay6(|(+%CZ2zMKeZP0 z0O(z4C!9RUzYX*<&_nwv+I0`;+)qZ`SAUOu=dw3!yRSZk_(sHMf!7^#9^%_>{Z;!% zJojt4jPa)(AKwoZS!c(OI6i(d;7c#V^DFY3X0 zY8mKG>N{m2?7-Wy%VX2lJ9(oD6-r$Ko5W(S?)_gF9V%w+V!d0Kj_?FXgTSzi;gSz9f;>XL`xVy;Q07mh|jR& z$ArhTJo^z}k9akon#y=JF6#D^rrQtsek9)&hVRn$`KT|CU%GGM_W+#l+XwA68E0cV z^u=Vt_Z*S@^h@sZ6iKfD-48nL9(rH28No8pX?OczzPoT%gHF5KM@TtNc%M^i@=^6= zo)@Wdn0yo+LYWWC(QNXu>CA`a5ZI?EI6nxP-!OhHPU{5mI}x8)(((N0EyUL&{&S$p zw{1QXHb43r@pUUxL>~L`)vVu;d0|G|ys#hQtyL$U7bZX!?{hNnLez=ocjLjA55DSO zWBr2q?L@xN@3`lq_R8dXybpo~PP=Sly45Mze;w)GKspQv_7fb&zk+z~3w00U59@gQ z{)hS1RsX{^`+2c}^@!E;)6JBBU5faaa!AB` z6XkLtZySOyj!@n!Oi`91b^|mYU#wWjmn?#OwEt1?)q`&)1`_x5^X>83tg3N^4ePvf zaGM)7D>k`2dM=)MDf=ftPwqRV>^uH~uytPAe+J^YKb5MV1v=iYALg6Q_?t|=d5G8Z zf?v!RNjvOcMj?)6Z@4o>EFdFfVrJaVJrN`I{CF|Y+H`XKELR22I+SZ2P>+di&nT5& zg6v2B>pO@i`Ip{D2tXgs7*xFhd*Pgnvkde|KcxQkpmza=^5-Mm0D7b!QvXKK+v{(b z&o-Rgr>GmimPfnp!RZBksZED%*EuoIU+bF1`&-=S1@CVYz;hI58TcP2|HT(h7c)%-wu5j<+VfS`blFuboMLF?a+S&U2G1|m-=%3#0&aZTq}D=^#9`#??*hR zT#@nAoBay+(~ewE$_Kp+^!C=1m=4{J;P=4*Ef+}oHW2uIZty;M21)%A8P(Su74Tz6y2YesF zd++h>pagUa^w4%df9CoLzc;=Z*D4?7&v>5D&Uy{=-2?t=@N>GQ_<^jy{h;&vTAxi1 zslNo!zDB(GV~Uu~_*dYM$VaVvnI}OYt8(>&&<4mEOg<9LeZ_bN?n}YnM=|up`hib* z)Vb)CdAFeT;B&re@Lgf^h3G*&S5r=H3jWrLku$T>buO1M@pPNy-^;!)+?j!#J?rYrpbv@|kRUywa z(Sub+VV!DrY<&zCa44VaCxM+4a{`_zUL@! zor}-=ao@StZSFf&{ezGD+`psB0lo{c7U$-@x|vrnzsFNVJmaTc3_0dG2jA#n{y1pI zS19KRjDJ`ca9;*K)UUap$NHIrTtBG>JyK6M6lnlGQqO*%H-a9jC;dyUpMbuUe!Yl#==Y@4Lf(@m-=#>H z@xI|JZ=a9lu3-KLQ^X&s?-RP*_Ie%Tw;{gv0~7zJ9z9Tu!?lXE)}<7!REeK@RsL>qdFk zgHMlJ!TZg5_WL6B8<|`4|l^FqyZ0^!$nQbMBvZE%W;q(y9JzJ|@qV zE>(WS*J59=63UescKOYGJmmd2JvCGB`>FDyW3vWM6Q4mp_uODQ*sbIDshDma(s>h1 zeg(mFznEVI(p3+cCYHgW-A@G51?P`?Zb31aUzA7BkIm3_+kB8rC z`0`?QY$HZzJm_RQ;ru-CMT)qDc0t(gBY2sBlg~G~Pu@@9bI)3;>=ydIBl%d~$>7Vt zzHH^VcBf?7_6@5yU1`DFUzLu}H@OeqYm|SUP8YKNr9BKQHqe_o&xx&-D&0<`6G_uV zPs}UaZ-=B?&xcyUx%O+>HvU8uj<&#m&o$9>QCQvaA-RnBs$-h;HKt4fbM$oB)*DN%3kIp7QG zAFN}$eyQ7B%sG`E-=ZG#r->faV?drt7y0~5`9Juo#Weg4P4MB2T$k*{c`NogJj(p1 zLQdN_!J?>xayUQd{*DuIt>hrqUT>v6IX~yVmTJBAa;?t^-`{I=sB+xF{4ba${OG^j z7hln#KkNY?_rpsh-(BtVQIEsm^LwU=1N4W3;M4Y`7vO!%wQj6;R^nBKMAgTFp9j-% ze(t@*_`^T1w9}o?ADE8w^ZLuCiD|I2``mmx-3j~81f*MwbXM%Nw)YL8=UT3}mAi1~ z00tbiJLl(R;JXjk#_qxQe3<`!%E5k}MJTs>MKFJ(chHAIzhzMK_%4{gavwIBQ_BVR z!kGX%_j`l5F#2TBxxZT^|6r2@y#jgz=#lfJLzD-4Jw9-L z&i(LO7+Twou&&uuxXKGpqv@Hq_gm&4=fSDc?WAYRRb_A7nW_W}NOuJS1Tm_g~=1q)q`E2f?B z{U6F34?gbum(70WN64GG!Fd5qU>>H-#oIirFFxP2uwPy1e3#`d0iFBH1=kxR>iG`D zCn8;BJ?{iP0D5FSzXEz0=qP5;Kbg-V(79jU2%8>~598JIO~g;L<3rxRV|+hkkFO1=Z;cnigleuwJ$Q6< z-^6(4>4zS>$T+jooz5?6%{%NN=}&lnBUT-6@5z)4dT9Ar?!`FMKo7Myb;!q=0s74x z-#QhtJo>%ako6?;-GP()`@K!R>Ez>$ZTL7yzCAeWuQBp|-LbsG;N!l5FHzo;9n0&2 zci|rm6QZkhMB!7urjd9Z#pdxKAMaL;D~p^Ed*$3&gS3^7eKZ4!}I);zn zL4Mv3Hb>Ay>~vzky1oOy0-gH0O-{^bG0rs5L;Z^R&`uekp9WO?5$$*_=w&B}--&qc zTUf&MlLN;+H+~Cr>jo1)@A&wmOpo|?nZ5+^GuJxL<5DK(=+uvX#r+UhGyc}_^a-Gi zLOl0J+{^y*e#GNBZ+X;3`230Mwz>l*-(v7_|HPRvhCA+8Y(MByGh-Vt$>UNn-;bo7 zc-Y^0_l?-Ep*gFyHPp{2cRbD}&~ITv^^E^Kj#79Rmhwr|M{BF2_o zBg!1OK!nE!;`HOM3w`plXfYw-o@h}dw?>Qgjw#!s#Gf4UShOf}N?cbvKXoAX1vdk4 z;5S;7OZk*XJS0hZQvH4`<(<)DO_Y2$O8hYj*?b&D!p~6>NneeYe~%WMJn|17s_cS zh+{GG$sVG%v#jbV-t5fSD&8@@-c>et760jq%j;w1njT_%EC~&!zJ;&ZZt`CP#TKtTW{HFN?I(_T2kspr z9`7#S9V(7?m)nPkue;0jL&c^Z@}>Ub&pjmc`CAXUFlATU zFP8fCUUJJ|@m()@*ATI-x7>c3c&E2~aDe!}xBTq@@o*n`+aR&OkK7BD`^W>9c;pl+ za^w`){>?aw`6iC#X+D*D*PX`v-ak#1ys!MZpV-!4ZW<&S`m;Pg^k;eAA0XEZ5;qK# z-&^9Hf$~X9+-}KdA;E&kgM(!2An|m(+zPYA%ZCPw_Xo?L2aAUi_VcahR=V{AQSzI`;;AV4`C_pzNhk3 zNbK(Q<0$Q$F;#pUC%2z1cAP2?Ocj4R zjS9Zj7h*o>D>s}gj`o%N=84LF@|gwVu72{fbH#7_$)@?DzMtGVUo`fU|4I`-^^+UZ z#Et#s-i2a!fBEKl;%@`wljn%d1LebsV$VQ%f1-F5)t@L14U~TwBX(P|`Al)Zl3$!D z{%Xl*&J@QidEgB3)F656Owl|@-Y`n6i`Q~h~Yp}d&w0L>2^QBSZ zAA{v{qr_7Q@_{j8KRjiO_%gxy(rEGN5Lr21R1B5hj1x6OqhC5*+%-&oI!in~O#WpS zzIt-sFiYIyL*btC$?X%wyS)6^Cm%jXH2dU}6U6U_%Ns|EXNJqCM~Ht6mup9gjU%YX zyCcBZI6`h5C2B{Kr4hfQ#BQ$H{C$*MKU&m|rWQYrmOI9X`Z4mkM6qqGeC~Ad&RDrQ zQG7p^Hh3?Qwf1z9Y)TSkqo~WLKAR-pnk=?YmIo$_=EDv3J%ll`D*6H%W8RDZEa_>y>z)bnZ zOwl-#=H4?)Hq8=cv*pG);-T5{zEo@-EVrkMCsO5Ssp3DW^6oj}=p1?bTyg(gHm`=c za?3g5hq-d=Iilely4?@w(BU4OM_2oDp8WH6#BLlm`}yzb=wDEf%$l**m%=gW!< z#3$#=n=ZhT2z!S&FJSL*_l0uJ#bWP;^4LY9`9iteQuadp2#{@IBcv#fH8awp>Nbn^0PC)tpu(Xr3<0Olh< zx#gMxVvUr4>B|vVh{rtgu6Xg3M;;g?KJ>`f2Z@?a^3^y|+etngCk}Oz-}M)VJIN>e zMGxl z7q4}d8wQDYy2>{#4y-R)qP{2Jd)OZ*H}(~OJ&mJVSwC6ZU%b^%ZtpL??I$1UFW&Dj zf9@~t9+)k}-IjdS5?@;KjzMDUAo=zn@$DdaF9w`=c_?1oI9NV5n1$?0mpxCe#vKbT zK( zJC-Sv<>O?3lK{W zCleuaTyJ#Y@gKDP*fBwF>Lg~!%1-!D99Jv&bB(XTBc6B2icaEyW6Eb9QR9^V_J~^N z09?Q7oPgL(E_p*I%)O>O-bwt+B{z2x-?;|h`abs@#C}QRtd|m(zg534Nk5WSMNv$3 z6vez00s8-{~RG zE_J=f!?bUz>qa(5l<;!&lF-5_w@1Scn;iFf#LI5^P?UHFzaH_vy8g~BpY@1mBw1f% zMShF!>sUT?v5lxT>OufY(}V0kiH-lS&S$e~i@@DOv@;<(=<_PXW6nCIiy zBR){qKd2o3Aj$eNbNEi>fH^%_)~Ous!*!H+SLLvkIXoRLZ&5j1HbuT0%`*HaN_^y8 z`en4JaLZ#-RO2@q?!oJ4-STFS_})#<8<@dvDR1+LS0q{9QrCaMwG{V8$xR;d73W*! z(enLh8mZnnQoh$!Bqu!IRg}sHW5ou?t7Ly%KPo?PAo>;-YG<^>5pslnTY+tBX5Wmw{X!1miydzfpL0!Kb zL;e=@rLm&Cv%E7_+|rrXw|6G}wayZ$-t8A9tch$}FibZmp8=w8j3Mp=LNOZ|} ztLsM`Z#aH*i<{A6Kyt~yyTw*@{gO+9{;r~YsILF+q6oC7kKN*Dmpteem2UZon-wj@ zGj0;z<0YyruAjWoA%@6eaf#gMV77Qml|P7!|CPk*5p?*ELp#yNL)9P9bG3bA9TtuUE+Nw`My<@H7?R0b;%7CB84Z(It<0;8Aj8XYm1U=y3n(mIpeChu!ig51Z4L&H_z!U1xE>lpl8zuS?!k zZ=qW~5lu1ZDj){k0|adGP}p7%6Rqz=F}Q(6qOUqp*qv~hXz_duGx;1hvz@8a?T)`1 zJJm_?m{a}(y@XR9iDtpqdBpuL`BSub-6gkpSVw4AccHZCF)$18h$C*f-6L+5@}E(7 zrURAGtNkrn+#gLbFGW+#VQPxW0c8Ef!<3bsDC@CKO!;T%g?su=l=UEPtfR%-F_Z

STCxwqgN0PivV#-n`QGGj|=(yKNaXx+K5r=%-%>nJ8OB{8`k6mJ&Q{L*r zyPe4RJ~)(1)Wf-4;tLnax4X&C3{vSC=dq?QmY=yq7J2gJ0hd@Q|L79eN-WOY=#bC4 z#J#xxc8NU>4kCLUa;pm?uH51hA39`>OAmOTsS)NYCnIlX#>gN)Q@$j{74mT@ise>` zmk&{bH4e6j^$vNPTWoSL^IAxB;|Zp`*ClRp$_-H*k%f59Dc_aiFHZSAw1srHc-AEk zyT!{cI$jg=Kph~mSu#===!=sm(9^0wkE;SfTNDT*Bnq@q6{y-JKBqr?tcvov8a{6^ z1&SXnk?ew}9rt4}b;^&OVy{#F;1u6b-#gc{E~$0C>_f7=Ddy`!tR==|qmMZH6= z>4kw+{=1v_%psref?=C0E|_S3)&rks%fI#z^-lS4PjSpCKkOl{cgd#iaK7%~e9a}R zx{3GDLVB^;fwIXh_r{|4AByY0qqX!B8>FJk=^ML=ze%~hySOPz;(BA0{KP9Bk9xv^ zKr>yj93y2n&RIeDz#|WK6BSt9@rvJdlHYd|A7Y;B6>DOsXC2mfyy7D?7q6)7EcbfF z?>ozzy?BTq|JGI9*hL=cDW2(ai4a@6l7JgoytL@$XyoAC|SI(pNDUpR2r6@z);cQN9E zgb!lGoeqiL#~f2XjS>HL$Q?1_d&fXr-|tKZeWOcObjG}7YGr4x1EC+d*DZ1VnEN`! zqQm$sMl{Nl)=uJ~D5m{g)IePSIjRR@e~MyGYiOB$(Gnuxiz6ur_s#zE)Rum z^T@x)aOQz)%sfE<+#@08UyA;ny2i>G=+&Jl=WeVxbrw51k>?%t`*9~`^*{`=LLYQP zXYoX5c|i3+9j=jFc*)3Gv}A|e?Lz4#uHi^`xaqEca?^$I!b~;2?^P!wKX9^H!FT@Q z5>L=+G0uQ|%tgM(-EzGY|FrvRi?_#<<_mk|DbLPyM znKNf*&di+)IRY-Gx~mJ6|H9JGCzkuvMmO1?$*sxpYM~Tos+*-)!EEl4^n#71 z6>7N@bEU%h4%946b=nK9>C2LhIb;r>utm!4GU*G6yMPgZ5 zLkGB}>P!p)I!|AF*?fO^kHm7=Hy$2T*J4Mkv?RD^Z1DQZ9<2cGt(H2z+JdJXiRHvv z$A<#IzX~J2BOb*>#PPlDB*Fi6ECJ-)6V_cJfcdNvpF208&?_)nDNd)CD=|x{Ta{R; zItcL@R2)W;fLfu)%2@up^!I%uMU~ni#o6k7uK2hFT_x3a$=NMpwy~ts9AY3>;A8n% zr9LD6EWJ+LAUUk!EtY2`WAqFGYdT>33H)3zfN^TEPb~%eevW^zA>eR>VuW5ug6i+X zieDp#*+UQW4HGBB1(h%xYM}36F|z)!cTMf}XLiHZ+L;Pcj@9@!iGzYjze9UL^>3av zW~ujlVy}0dM}6XUAN_ArwlUi}UytjDo!t8k-BF)WoS)7M;95}5Gf<(}&GV^$`@}Z= z`x7JP_^BG9>@eB*+OHm0;!eewFEMY_ow@(sBT)CU`6EciQbH+VTC{?acaNiH`Z)B% z2Cuz(4t29n?Zouhr>6MDkGj{Po;i>1t&Y>3j`bdAy9l@Q9VDn>X%8X1ov$zO(N^_` zGdX7{qFLlpsUDYjaTDQJwHK*(4Gp(TKU(Yu{829L^dYd72u$m2-m&4#eGz}KdO&L> zSidkU5#mEhLm2aF7z%2>y=7aY3odkg#p-&;SA)dYVpGNu4>{^WhZYB|uC!Vpy&edk zt=33RBe6n(>4N7_=>@cD4>dva4jZ1-uctt#PavdO4>d_cPFCkjys(B;(A$#;IG-i4Xghg~e@3;UL8Qn1v^aC$V{!gnQd=lmKAvy$LJ1pt@083sxrMAf8&CEaoKA z_JA1=1R;sQ=aNWaZb-c!5}QJ5dq_+VtCy1nmWasZ&9DM7EDM+M+{T}%gX}KU^u5!T zN^v@Um{vn0Y0Zzd7a~xop{SmPehy|>4c>?8BBrU1y4=r^67yq>ln-IbO7X+85r~jP z05Ju|vIWbK)r!lIO-j70)Ksk$cU|$HN^1FY)Kg)+G}CN$7+1TiN5jyM%^nMhi(&e~ z4901;DoJc{6iySp=QO(@k&7YJ=PkZwGm^!}KD8Qqh`wguVs-9UA2fmupxN?9;wMZn z8bMb#L-jqTnD#~03=6JB*kMc&4+S`z#8L@!P*{C1zq~I&y__PpWBHaU=41JmB4E+M z9Q0qXr0O<$GEqFpmfD)Au1lf`^@b$27ba;dLh6IC*p69LSS$~#Z^L3)vU)=AJYLyI zJkm&g45L7bx;N!F-IXpbPf)j~!I~8KJXL&^ z!1c;BNJ)yA6QuvfV4o!^Vq>D(m?BLTX`(SQaAw9U%p3>q828!KQ@VUkZ_*ZLle&iqC1cUlJzD9bvMt zPW!in0|3rUCb9X+3WTrF{)dv)yD8%NWHR-d_J6O}NeddORapHrQnzM^i&8-HGQBT; zQyRP*)6`|@+#+79x5f9KjC--D2Vu^>3=@BcBlU8JlQCGemr(0I42y*$KBgBH{Kpbd zul11a4nW#}sJk5B>FP>8x1-uI-PIGVyJY5?wuppSjBHZYx?;IKccIsvIpw@`%sytNC$#|IB9B)qLZWht^9~bVey<#y%7=@`hA}yiTnHkK)2!SMhLSE z-)CWQxl)LEUp2??j{yaoISDy0HWF(RXxD@d8!?N6rvZ91MxSJwexOUICaDWj#4Snc z6DTsS3}?WU84?>p>h7?Z8&>xq+pt3B3zF46$zp3VS-Z25+R{jT(MW+j8@f|EW(;Gv zSv5}!#+_1~2djkM$@+ygf>{$HFl&N%m;?_o!4yZ##z5nU8>RYD+Y1qcWI`;2T|;uE z0N|fVuGOmTQw}jbDft2X$3Y9$gXLlI zT_UsIMCtehW^l}RLh6dJ_$ovmH-y!?WbsLuJT8JxNoIT1uXO=vSW$QBtmZhFfubi4 zQ+Q1X$^~B317guN@OHby?NO|%w`h_0qxhM5T=MqfT<$b=lcY|-b_z6@nRM z2e%$uG**XUr*b-XD$m76B(qp0*wR0~K6Q|$>{o1U(wIoK5hrJwm@c-l>rVm5!h%f*&^tA9z+ z_dJQbp}4V*eo>;2xf}p|C$#fg`IJLLDU1$_*#)uq*9>Ovtu{+>3VnYg+%cIcMmT}EQI%IJK@yq z?v{*lI;tldi+yfLf3dN6Cmk8h&Uiq=ua6CF(+9A%I%;~V z_{iyiN$lM|wICHHw*&7^;r`#W6tTpg3;)ypzJM;kMrNwGQYobRMxX5aRdHrL3;T>I z0{e{kT@_I8VTObqSxiRs?`t|{n}&XbS$V4XlG_*y5;$wd)EvJrBrwCPu|1apBPcWc z9R5@RlPAc-iAtrE>47Q04o3m4~k_w2qKJ|s-3QO=z05YHJXSR!A z`bxkKJG0&FXNK6rMuuDb%n%b0WcV@m3j;KEAVVy_kl`{M9u45_FlL40q{!+4y$}3} zGDVLu@j>t=b1~Sn#k~n+8?@G86CxmfcGSb*!>6#x4I4ZB*#5msi8Ve#x4=G=z!R59 zh4B+t8(;@q2yA*|lkEa*$|Q(|dan*McqH12b!mdQCqTrPDPvy+m>!vbC_Ag`!lHNi z;xKK|mnY-15q(!-m%~w;aNg8u4gV&m2cQeERf^#c2N1$E4*|*p2*B}wBmrK66SCNf z4)nbSn}`YOg|N6ILBan_f)CKCK?PJ817HBSMEfDrh<{r9x9E5rAOOyXI{tg@hkPUE z)1nK=jcKf(htpU;o6}f7@NaT%aR8nRC6FerS4CSJiI0_9 z4>b}{@ZS+&Ji7eaG@JtHi=K8Tj#;GPNM&pIvCRSKbd)+xToWX88xB~ciI0MWUYxi< zGI5eZK5r(eb*XrLhJKu&Mf}nbJMJwZh40Vbt^N07?>$X?6Jo}{V&4dgB&=Rd<9Q~e zLMH}3_SNybBAKaf)BZKdOtmSQsa{N0x21?5bo^{=CBeebi1BZ*6Jte>i0vtAcB)vE zs#d4*MCXNRl%#HRU_)_u{^&vwI$cwN~Ap*93AGhQY%_QjWm^fp) zGu6lG&h1$0VAIpVq!CFTa~S=R!)VO+5RD|~J7c)-T&W&Qz`==G31X^Lw*`4(YDG}2 z#3?fL2M|@_c9>BT*_X}_(KvQ*l6V5@JVZve;&2Ph+!%Bm^(gkI9kmv-OCN1f=xJ9a za`8I{`&IhH4OT7>CerrvLr^?L1%XrCk0x;!5NF0=0eVMoM8A=MhF7;>)~VE0Nn(vs z4<_-XH(1!N$ifaq7Is2J&^;g)iDFKGEG)$7lSG~aN9K>qTY8z zxx%udT3zaB+Xc+`A35A7!{#oQG?E71OXeCnM9ywJe?|EIP8 z4POQ@ahLO0#1xxXXguTf09Lgm8$v|{dP8Ek=Iosh@_ z`Z&RW6Fln^#p`U6c|k^CClYMGf#U@bG)`Jjs8(6(IVJkP>qniz_6`3ltTQw`Oy`8- zG;yM6|3q+QBKIoc*Y>p+64l3vgny6S*C)c*ecchp?(3Q`cVDqo#qR5?SanGCZkW5T z@MHHCP{=FnZ`gh1-mJU(3jb~Xa{g z=OOOC!Vk3s6zsl&7V&d4x zD!l)Y#PMPt)${Ekbtke4G4G3U6bR9nP3V=={;(#zh%HX37UA%mKI?!r-&H=Yg>m{} zK6~G4AAwl%A^LkCcNLc6K$(`O{juG)5nF87HS*D}zQD&hF|^KdM_uNl)L;_gBXBd8 zi9Yc!OzA+?PatZ3fwM)bPZPN>@jjXYsstzG97+q0MBJOm1BI)!LV-X)p`dp{p}dSe zYbEm7rC@>%t^v0viZ2$5av7t_hI^RLkG2 zVsg}#VbS%5&@UnJUWoonc{+GySZxS1_c!F}gT6u?qctJOvfY%hxDF=^G0|}PV=6HN zW&oIKeC^<0@9P5SdHz|g%zzo_o+ts z%$5}vaTq*dq<6Qh;%AuQw(4+E0F2T2N0tZ=68+&n5&y6~Vvm{jhL1_Sd4)bcE5Kco^#QDOeOOPt;;5C_=5+vEr90S03>67D zkKub>@emJ`JC622bz=sPq~|Mk!kbkd#&)GXNf65e`rKzg?ZkQ_L4ASaoC&0e1Lxo2 z0BcbFs1J$u;=CR28c0+l4p*Ni)z5mTTkwz(yjY^(_#LJg4ksgz>jM|qSVH~xgz*9E z@1@#c_%LJ}Mmi$nz$s2XJf(j>!%*ilHeZaGPw8}jKF5MH)XP#_f!1^IJs&mG!Phj9 zA@&Y`(i1fN!UhN%@UP?p3!x1I;m(olk!&Nz~F`8&ikNyJ7$+vNqQ+B;X@$B6B0o-b%Ui7me5Cir7qGCNi z5fGPPy$2khf^59+W2y`FMB#2fF>&|-FJ`+t*%#=8y)YBv1O$!;$%wV z{=Svks`f@x9{hZ-DR&39W${?!^LXz76VWV=6<20)O1msm{N$*cGdYX7s-?Kzr!K&I za@bpJ$-!xEE1t&O)IxlOX>SYeHh$Th#?nukimPzmI9FUAP&2bRM?@~~20H)PSlpVR z=6B>*IrbHq+;?&WcF<-De1V_0o$LQ^NwUY=Zp-<`?o>O3s-)ywV0 z&5hKGJnehz}t9j#p7vw zh49~LYExTrW4hV|p7GmW{F<(ocN8~esMQ_$>emN(VoGDRBS+laSbd9J8mmot;=v|p zm=Bt$RUO3qrV3TQt|=?y+2(3>3$d^TtL>E*tmdhitmX$YS-~G=vVu3XRMWG>CoS15 zx8qH$Eb(qDHqL`tYGrFNv$eXlwb;>GEovjKZ^P<&zm2*mTb$2V@b1l4%W}lj9M;#< zIjpHExooGKbJf$i;+tHy(7kxcs=b)nUaf2|erT_j=Ha7KY^@y~)S`}B>1)0JtA6-> zX#G?gm*30eGvz+xiZ+mCJR9dDncgSI_4Wyh~k|g-p6MH*^^4O&#~dz_T}})Hk@GH zX4YNOs!03X_zjsR?7s89)fN<2gy^Tab$|KQlxLX5N4@IuxqN(K!=qai*%bdb&d6F* z{`lOEvF$R(x|6Is)4GeSyTZC_th?E|uUL1xb-%N2AkUPqsde+L+snFxtb2@g$5?lg zb!S?4k#$#Cca3#7TlW>~Zny4t)(v#9`CB*7y1lGB$hyZ^cZ_u>S$C#&7g=|Ob=O#T zvvpsw?sn^bXWc+Yo4u$I1ch(Jb zviVy#&$_*=JIK1nSa*zdCs}u~0hce8b0vF>*3erMglUN(R0=2^Fwbq87Z z80(I)?j-BZwC*D7uCVSJ>u$F0E7sj^-S4a`{@}CW8l#UsS!~>ot?QB*cdd!P+`7+M zH+6}LKgGItSa+v&iYVrU@_)5;HO!*iy!Y!`3?AmzX8AMH{i>E10K&$ ze3a}EjLQ!Gg}t?GYv&>zI>39WnvO9)6>uDJ zeu|5qU09?6WvvZ--bfR^)C5KM<4ic$hEK8K88-Zd4IkvU5pj;;Z=WmO{EAL4(ioqC zgZdtW-)fnQ-+e-nMmwWEWW@Eszaz45bn)-7_^bCe3F7&UCu8zU&6T$Ps;(~5C@|90 z`xyL}YFzx(dkuc5UToY6jd({qh08C38YA1{@38o(*6tFv;cOc=*f!GRoP`OPBToC$ zrPtl!zuvFqQ7<*o1oZW!~HFO zs$<~GExiqX6JBW>s4CUMm8I9zgn7b`&%BlY) zVRS#AO+Urj*<5+3vi3Gt9{AmOJ`Kr37r-2G53KkGW1hu-wt)l&eFCpr{*E(!(@ao*x0&AW-SCkH z-?8cW9cDhmY}lejoD1P|M9FAn>yO_F=Cd-O9sRvs`dXMEcH9g+Y{UGnEuW5VIKIDH zxZ8jDJz75eCT)FB7ewMz=tC|)!rFn|^zmtiX#5jX1D1b&x0O$8sU7{@SNdFj`Mp;@ z4T!V;-ZM^qjCASoyQ_TA!Rmbsztfky`21ceA8dWq`xyLLU%B`jPBr+i*NctIPY~~j z0}2x?$4jiedxq5y{B9*5S04DCN1Z+5-e|}$) zkIOH=r^v@Gj}CZ#hQRNL%&|dNo-BSuo)($(N7yvkcE7<5dq`=op?CE%mma@M$fw-O zKflMv=RN(4{fH|~IHLb-yz4v#k0Uzbo)RO${0<+VQ|&$S*-x493py4*Pn+;y8~&~F zmfs!Yv&G``yI_1iwPAjDi_bi1BKUnRKD}(Xq48>i#;W^c>Sw^VL|YzypNY@CF8vox z*sTwKPl%6OUpnCVG4v{NrMRh|DK8uR&+L4X-`(Nk(&Kk__%uYX3t$+VXSno=-Zc0N zbgH^PtQ56T|0x8|L?5_)N25e)omX z5z<8P`!0MAvf<}i#)jkPYrob0@%o(KJKGL}#d~myre)xS7J}#W!9pS@okMQC5 zJoucafAQ19g!#=4KKzabAGiMby$n8X`|$f0eBQMRd+Pub#%r(oS#qEWueD)*Z-S3I zUhq2-d~$u-(cg=pZxY?-}rM^W%34 z_zbY)rw(|2euCc-xjRBuKKUI0KGQ8ezZbyA(Z85K+i;o<$G1meGe8T!S`m-VGjq#cOoAjldAMbVNb9W3@G+^*g=#*^v<2~+t zT=|P)8lS;$d#;Pmd(8O+ZT%Rm=jr{o&^bF4Z_ z$B(WkpHNm@RZ?A3R!yg5d`)rrgz{REi%O0ISC^eOvAnu$wAfr1HhSXtDsd({YFwHz z)n#QkuTdx7W>s2UR#ICgmL=9f&`eZ>4 zWKtRKaE~WbTvc5@p|*HZS#?c$ft}uB|R9udUJ12iW+Mii*n8lG@5@6PtZNVPWx6 z#if-KKw@HPt&ZL60MY8`-ABI;i=h7*?#igFDyt?Zo&OZyqI4vhw6bb)b@|wFwZ&+i z;?jzenwr8wcuBYnrLs^lVm#igD=D8KhGDd+Ikl{UHHPAf3O8I{TQKspluxLdSX)e45?2$Bx~{4!8x0AF#J__0h*aq2nvGva&>HnGs58C+1{SsSK zCzMnaSBoL&Db-aK@eo|7<&77Za%wcl9x@SUM}y@PMvH~fU|DrF-tded17GtpM)N{T zmmgjE8()-{ipmLNiz_EoOa?Q!6eYr=j;|I8VB5oA1RF=y>Hu_3oI5+(M)n2FLaz<_ z%Z)tcV4QcGtQD4c7BeI2FL6jUBPrqHIPI;NR8k?zjThoACTlOGpt?r0wNv*Un%*{| z-<*yK*W}5?V=5-rj4K{Bam<*qYH^c}Lkkr%hY6M9nMeer=Br4ca$>D$TFCSo7b-y% zMuN3eLnrHCO_b97NCYTtj08yO`$*E!WuqpJ6)pECWu`UZ!*y6I)$_C$@fBr|Y|Hgp z9a&XbWg{Tf)UO}t=!&v4fDUP`oGe-?Z_iB@^7Jo(GUbagC0eTw7Kn)_G#sT~JH?v8@XvGXl6Nuh5azV6vpT zRy;kBC}3M89&UU{*Ft+ybDgw@wf3xmns$0e2XxIh8>X`uV+zjtpn#G91gQwgoats?a;-N`@xdYBVy-s!* zU&tVJEC(kCJ-rk|Q02tx(lRgS=hPD`I{zW;Gk}e(#+TKOs~lZCs&e$?m<$>MiKTxs zUB}70Z`iAESbYfcw!4Py`i9k-v+A^#kFrnP=)CL@HRTf*NtkOAVrk zqduquE5($8OEELO9W!x4X=L@X5rZ8*e}K)bZ6^qE?qEGGP4G^kr(^u3uBg&8Z7fb} z$BAW!{N?d0co^DDFFT7dcW@`{6H3OHiE}Vtz#7EM1tqJjWW4z0NDK0$;dm`h)8*i% z+C$)XZjq@2cfJreTM*o_@y-?MnJLgn8jBJ$Phg(xs5?g*%?(O6Gj); zLILS-i%zK2JXr~@!z7#>x(BJ2WmdJDcB7%FsKOT$ultGKv8i7iu%R54TC^J94=WN zZWgAY72g_QD)A65;tC5(MvpGWU{%aDT`{bHlTq9O`vHktm4SSmYf2QdxDo@8Lk5QEqu}QoICPzJ!{LfqikKso4WSBJn zgL#Ddv=gS<$Nr-kjqUR3PsOb9;w&#Tb^BEUt9;~M*Bn&`D^j31%=U-I)AL}Kol)i827MhS@A5Hvvn#sg zUs5!eND7p;?&t88+?MqIj|{ALGrL@_q#V&6^SPk--D7U6-AKy~HskI^aCwp8$L@2)ZgYqSb_wNM9V|;Et?72h zlvGnKi1qb@53j5qUs7>257h9%UKUEocQ4iq!5a(%>amNT6K5$G$*708I4~X;BYOTW ztbI_0aVZN6<03?1edhXjB$fs$5~bmrA-p^3=upf5UJ5P+!NhnIHtU&94G1u3n$UTU742wuS+7&=t!5#c=|{9h=-vDT-Jm3zc^ zj~M^gi}4*WLugGOF`w@qy}xHo=e~w$YvM1jlC%HYPiX8O!D2PUob73Y_jsb#n5cCl z<{)S^S_JS0%^$&EiZ@&8$2XQ5T4B!5pcRHUfSifLp80#0n1A~@wcRI7dzP5GN5$Jo znvB^qFzlJ%{1?t|w4LOq@;%bKM|%GYr8jyOqc?7L*fTT#dr!OXnMFhw6}x4D!dXPD zouojdiT?{VajY0WHL3npATQD|>%e2NuGIPj?`Q3%mu7aC>0r+iC$hwGuVy;o zB=I9|c&Kk-A8pdH=l&K(%9K6YZI5>QFVt?BXPQ^zo|)FLjMv{g#P@R>PU3m_I%;#n zr43JB^vBa&dvU{Q8eomaxJ-w}I9_Kvtq;ECxi4-Es4kgMGX@9m`V<$BoiMSu^o%ob zIUHW1=21MpOs{WiGdINTE}hE*IRCe~9bGwu=i(0#t$Y9P6nO+LtV59#aF5xD+VTp# z>swo?<3uX1Ad5ulnxSxrMv~IO%ZV=>F2ph%DdZVWp4fCRE-0@#x^h6-uxk8Omf{Uq zTogG9Ckow>qfZ}k4}$Tj%zYZ-AbTd;OR&1Es-grpNa=gsUbTmvy$G?i#4?;QjE#!E z!EBfm?qOpumB_hsi?hxi2;LPAYeXioMtHq)=BGFji_Xo=-P#yeQd4a1)~OMj>w(!$ zRHM%U*N7eUpp2@ltSCbd6wRRo;%Vd6(;Csg9)O#QSG0|_7axj$)PqP0F-QyRL9+M0 zY^VoRX09A@OOOt&<7Ki0HyTdHYvtuU)H`cez(;lk{3Z^FB1MYD*Dv(-YWi-o!SiAn zz%3%UyGo0qm=XuWgcFH*fddG*^mupKDf4k3ab&4ZlH*Z z$g2~xm-`0sC}vrmKVh_?JRX;k;7*(##kGAVRhEyAPiHl1B{;+lG<)f(m{`0Zwvdq) z?srKnAg1#ihEJ+^+r|{#Tkd7`W7wBNY%Z*zo;cGRYhH;he*>i`j@*_Ji|$s9KB-ne zD0*Zk4j!|eb@^qMufMz=umAqjL$BO@^y{#Q`%e?Diftm4x)|5dK#__g;y}D;62d*n zbgdI`j~6TEDBTlaMySC_dVMY1(UYsnM8Czc;8MK(Ra;z3&`p1iM25;M zH^;&n9&pz^6N~A2?My6vu3^|5`>LR;TfFHO;Z>NZ+?x5D!4r4C+#smutFa(&KNcsw z5eqCGSB9H=a08WAlH!{8W5F?LU*8pwlfaktw3y$d*!w8-BReg2OXj7;H(c(BgL`@} z!GkeB-o*@?K6%hF$BK zgGVsEB;N&M!!PQ@&>Vpm&g|P^4VClxdQrUj9f+6rB1F!yK5;f))2kb+8x}u~;KxC( z7$yY2ln{Lbje~5Rwa#?|Vo{>#A{T8H4Hgzg{bCauC>o3bU5ZL?0CWc`&kH&y8W10$ zDxyK0=7|PaE`dcZ3dXOL6oN{>B2NXlwKBE7NF)=#>Lm}qSh5vYXLrJyrL=flSqUb! zoJ+d5q1KHPzr@VH@m`fXZ)Lg~GvyeLsdu#N$Kl{^s7d15U17wm1*&7F$)Lpvzs-}g z*iQkk|Ayi^*A|QY7V3LS-PqdTKb+zwq^MhMXuP_S;>ACJ`_mu5ZI2bi zUmTByU=^Vk2X$t+c5wdP_g>XM34d=lPV(!ph)MWWmwQFf^JaL=O~~7?@M3ud)!eUZ z*UX>BOxwL^E<()w+lxT4;@!*oBN*3XvfW_6&^Q0$0(jhOuP|M$9}gW26k|F_pJfdS zwe2R6i<(QX|7}!HKM)TCzpmJNy1ec0A1(h4JeP=g^2WH5O{(YO>4ccsFKW|M7oz38 zaTGjrj)CHr`{Jj-yGMw3mrbD8Yw|MoPnN4iQY$5&NY zX&2)j;nzCw$rHUehN|{z?}(tsV2FJm-$j~pVsN9>LzrL$f=fq-r3NZAoKbP*UcjAf=)3vs~SN}j8seAB4dqazV z{RW;pT8pM1){lvyGX9GL;$0YKJvADwG?Dl%sc=?-3-6+}5XC5tK*gl*8lb{-B!T7; zoxj&w(iEp5K?8mSK@8Zr3$jtPPyZc<*1)COKjAZ^hJJn0!kG$a)&+^){y&$lHbKVL zZA`n%@UEzW8Cp{#Lrft)5!Mico+gP!5&NdbaS$O34@#EzhoxWL=BxxLx7Z zwcFlSFFKXwM6?4G=2%>>{&T%ZSVmd9!4>D$$6K_AnFuvrgXPv8>&3Ps zC+gd#{jEdu-|+tDo_lBc6|&zw(RVLVwZ?p7WfKR?*wgI)lh@J>n&nj;u2N`V$eT0! zZhSB^V-dujWpoNK5Q$;I;}#pf+3H=pFR35TGcB-b#VK}w8Km$HW)H;}Tfh+P zSJsnnohY#~2C*Ug!%Hz(kxc87>y91OI111q2-v435M z4ZK-X&;AJ8u^Ijy!7tno$2Rbf`|!MT`FKL?6{P%_{bM79*o@8W=;?xE%WKCSffsh- zV?+{KI6e{;6CWiupsM2|>mm06%3c?(F1f3Q{60LEYacDTZO=^>vp{`ykvHWr?iHlb zOl_2S?e`-7TtA|Dw`nAf4b-9SzWDUiu7KCYG(dz=PhGg2KK8pYKmFa9s5fz+uDF{X zOBNT@;=_h9EjW-zXv%PRYqW4SO8ooxBHr=W`^p6|@)BXnQ%NpY-~4XOlW?@4HAbXA!S^sD1HLqhhVo<0BmBy@4~PVnvpzux zHLn~boF+kd=hJJcsbC<8qXEMAxF+D-gd;V+t~Veip$3#!Ah^ZXH7G&XpDag_Mq3f= z49@)N5(mH$3upPMZH?_fz!KXmO-^>QP2Rnj6 ze{&n`9h^*P3mYs74o89fEp71N;I9m}vcX}&o0xBw4UP=5cKvPS!-#d-o)Y|&`R2&S zbZ}hoKq9xbC8`R3O=t%jtPP$WK(Ldn(gi#t=p$EqON@oW*LI2wRxxo`iIzrqnhfqu zHoM7Z5bbMwo(v9V@&fs`4$qdssSNj#XKMU;GPoBj>>!D?Iq)x+!5$0`vf(8%*qh`!Rg54c{SygBU(UGFxBUbuzeN0K$i9?tN`H$Y2}tJ5;`}^T!k+5S$EeA{#mo zWV43kx-w$khqOxjhX+5 z%o84E5{OsG54?oOND-Mgnp9FD5*|0O<|5%>RK;3=wmO|d+3-7ETq1Wdw6_TbL-`=- zbTty28rlbCb-I~Q^AP!Vx*N>wP$gJ$dKg&8P$_#TqUgi=My9U$qK`Kdj@mq<+m zaVea09XLIiTiS>C8388cLDE&%$8{+JE~uOyOn(Vrc_5MG2*^AIJfjtImz(&rOrBzK z&-bLccRE1VX)LCpJU1Rq^cTQ`94&RPo@Wy*j3#WNGb@!QwI%7=eBNh1r!lBmexDq-0JRq7LT(=g)TnFoR32C{sQ ze9qGN*t*{Xp;?WP>xk10yn_jLip(<+{)lDSD8I0{$95|M1m9Iaw8`)hX1JKCTHZ>g z&xZuaTq#enn4_$V7Hp+C%+Zy)ohh2Xt5dKB?vQMxDvNx9brV4>OSj>j1ouZ!w9bQi z8Ok)Bq#lmuS>*SuI~rJRP@ayuy{<+m`?UjszL&<{scD>=D8vwyTb^%3;0v2}O^+gE zAw*Xcs?{xHfG|Z53}YC68$SrMnr>$}jZ93Hq_vabp@f_8KLKiWDJGC|n$GnnhC32} znq*s(MiZ(=d7kFdnJ-#hM1H1A_L4^lW?zysbn`p&Q=708MuN>-eaFmZYUpjKkS4v^ zP$ulm@7QEJ6VI}=ySExc+Os5wk}%k9wUw;Q*5Ks`=Uhi>b2O{D90TM98a59HUvt_X z1lWZ-r<_d`p}D#W*abnZ?Hr`%SSXK?vYl?>5flm0b{PQ25-`>UjM0Gh>7XiaWYb?I zH`oflKfefRgm?`wBUo1wISqA`_YDZ0hX2PcZhMO>7k4-Z(V`<+a3k+^f!B1nj6l)4 zB8)aSB}^_9n~wB6E@U{)-X%yTI*hi1V*%m==S)WVJ%ui zbRSAfL0F5{5G`6g8P)<}@Xuyg3xo+TVOR@=2|t3cQErE8`r8riq($=xc`)S6nLkW4 z9!D-q*xmLuTrRQP-DO>>W-b4{G?7-R$V4;QOEcMzVa;SO&EzPA#oq6bsvy+^Y`3!1 zKCtc#Cevj5XtL`gWcz5cTO(xqXtKUE;A`Id>TWdzVNP((QNOYCYGJ`W^ zm8Ch;x~Qmbcd>U(m9jmBLB0-jxj@dd2<~cAW(d(8L>IE>_BaKY!-#l^2;UBW5Zz5r z=)0-1Y{B&;o;w7w zGSPR6oAzT;%(iKVxt(L>9EhI?pr}FlGHAHZ>76kN%x`WN;Dv}j9x>_b{l4@E{0s{J z^XQxD_xjcF0hpLvJ`_;_v)EQ#uQz+8m@#I(o^U*L1gy;>Lj)W5Vj$Rf-x4}aMqZ(v z<5z3vq&Kxwyi+@)QjtZl`RKXYDZ3vf3N|16sCLF}(@yzL?ffGZVj66IYDeuj3%}IPB`M8_a_IruS#*MSE;~;I?+JVRf+b-TqJJ$@?&b77LS#p(jmafyzb zH_D`E_rY)I11QE520$ql-W=YHA@AJq#pvH6JR=WKk$eyo^#$NdHX6zftA0%h0Dx7p zOr4Fcq(^-xT26mQqa%o|77wE!ZOO>6^$i_I~S17DX2trlUZHhX|l94e|s`b zXgla!*&Yma+&4^}Cc|R@b;3&_w*GLN{lG=Ie_yPx!!Ltkc6bUXY!Kly`Uo*ahW!KK zIN@%c;P}G>`oRf?kAR`GP{sa{@ zWr|;f+5})Ezm)8p8h%-`QvnPoyrBcx$z%(J_d#u^hJQvqXNQ;fvI&PEVc{iAxG+4Z zKdW>uDolnS>dEAj+A{eCP|rG-8X$K7d;X5=!inHVgbOrsBj^+-d$!k?CImB1H?? zo)OL5h)AOZ^M*G=-30TFYOS54rFKqiubr~~+8K9}cFJqvH0_okM4xOB&3pz`;qP-< zaw^xBa34dZKsqqQUIUy_sCwD*c=+TKT`|rN98`c6(Lo5NCmoL8j8y!JR%a1mGMEgH zkl;>(40q{ki6*k-8HAoDnQ?sxBI#sQh)gcT%5gqR+xjVD_5pW^K4%l8LyN6UUw;Im zMeC0VJpvsl@l~0Lp(Pd?1Qodh>Mv0^@1QU-&{nMp4Rr_f0dmXNO37goV;+^)YYug! zTfypFaMWlHc~>2B;k{@WNF;B-&q>gEvd!tFSW{aqlAe)=17S)Y!s$nxNWy803Cuxk z($(lr=_e7SyOXJ#4E7(9EwhPnpOhqB51J4;ZxXTq-KOJG3c1YSul^CqTyT~_Xk|-> zx?GJ++zt# z-W#w2IS1=uQ#*!etMLS`Wy1SeoyW3(D9;+kgr(G6n^?C#b5<}(5=*i;z9c#833~`b zP^arxqe^7PDu|oNd7IJSp|1u`9DuM@Ra##WP2~JQ1YJ94vPPw@S5vt;O~JFg5@NDf zl`gYZaYoPO>`hqveDs2z21c49=MYBkWPa6g`H7q|!p<#%2&@ZJcry5_e?;y~!uofF zq+|r|M2=b%GI9%nz#oyk6$p1xCA7VTs9I{KOha{PXM9?)cm zV^ROI-;wxbheX5>=2|ouzb2yN-ozNfjT7uQa9$i#0ZxX433ejfsTQOIQ_vJnLMO`m zUS^xBb8s#KofZ&kcA;`OA(q!d%{Dz%cZ>{jrGG>YTSJaVT{o>f8m!uO*9Z)Y&OuVj z;VfEC$OhVDI)xd@bm{UqwU!4!JemqU_{j|BB675dmqEic9jLt-1whoW3{ARAuyc^6 zl)VERC{V#bV+T@_5HOo>0kiq0j)g{S*@YApcivjg8w%QrJ8v!LYwr{YL(5CFcRF$P z`ayB$twq<%LB4AldWzn8%M4@Dhfy{&S&|;qa4NUnGSlst9ZcocTV{q0rlxZ1Ewix= zHc#c&TV@j*%ueOjTV_)m?3nr#_{wZ%gWXfP^_JP(279MYCbWeO7Nv6QEwiN!9-PXp zx6D>HI4qT0Z<$#(I5L%6Z<%f6oD}kWN-DSBGIQiS9UPZ>Ad%bJ5>=&g>n*c`4c4Yo zf-*bF5>$lK_KZ|+y=Cq#DLdTsNafaBW><(zx1K6(Jjv0uxVX5p7e|HY6U5k5& zVAjwcH*0@BHZOK%BOI(Q^wt z6#9L}v6e)4iOjne7l_nWEekebj@YV|sZCMv9jdEU)>%4X5M71+p%+)nAAzUl8KPhU zijyJ8A%v}9B_!1i`3o*6Vu&?|UAcnAo=~dT_ei$GoxRxPdP6aDZ2>nFGuIg*5Cz;& z%)H0QK(K%tika(8c~Zg1QApQ0$Xe>jri+w@vE;8O7r>F&459W%_AN%TN%LH=ZkK`9>Dv-|)uga7iuif$DGZ zx)+^GY&o(MqVLh|?~<=&{AZ}T7Wa9`WH1_xu^@R|kIQqIw6Wt+Po!=Nki(_AJQm|_ zi+}6#c~oE?*YENt%w}4YIz|VwQM8USKe`U`xW1QM?6p9tSQAkiCX+la{iQz{7<0Uw zQZ*Tsm{&xMe=(~KUJOQ~EQ8>5(gmW~KvrV_Z*j51AD?Dj5V#rdqcQ)Un_@QaRX!x)&sn${{AyyWl9One0PNsHk8#swDd`6FRtHF4`)42=#{3 zc38oDhK8EZ@PhjwMA=7}(2)g87#eOuM-{x)1EHf#=;(s`P`d09CN!cTAGMKvq6r;S zumBo0`y>-OwqPx(oMJ*F3$A8plnEVI@EU23HlgDS){^EJ6FQ-wD`}25p%V)(BFzaV zbdt!S8BF$oe#<_aSp|EwM(k{g8yo17T!hXyA%DTUAe1}Dgn|W+BW3OdCKN8{h#s1Ip$VlHT#9*8 z?pzaUT+kVvF83l6YF^L;dMo#06KYkks4GJAOenkHg}w+aq>gafb}UFrM(7eslGC=k z2$+*00dr17jco(PuM?`{(X$J>EkPjyEFb2UEL%|ZmiHlT?z&f40Xd%mE_d`q#Dm(K zu?^AsM+CIx&Yu$kYjQDUIrm`#WdUd`-^D? zMgVvioK8%hH;x#yh#~bouXFy_eKh&HUj2v1eWidoI_fu~)t(3Ac znSJ7D5VCV!dW|aA9H@VwMze}Js0TR1@F(QakSXtCbe`l?!cK7W5>YhS5fNFBIVPSl zV30G;ydH%1Cv>}`{Tci<^Iwa%FCqeabwwsBkA_h>5E*8@Vu=x&p)vE$An*tRKeOtM zF$UNsBt!$MQy` z$a>2I6nV5K%TLi0vnJab(OhZ=ytJCnxN%E)Lhm#mN2EnHYp9$ z@&~rY)cE?g;{Jl%j*BywnFmkZf0i>T0K zMx^>g4o&a!cIf=9Rdryr$IDNkYO`k6p-CgX{3r*oZEPwp%{*G}<$Ow1Z=V6iHfe@d z0Xekn%Wv_M^>vg4jufDRyqgFc+8eNrj#1vaY8j0V#LV-aCd$!7 z+1Js2Q~MevaC=*5C4Ub0D&z^o`68ZSjhDw=3^|GjUF=LXl8;q8+~AOBQ~J6)#nA>~ za)Xh>y$_kfLa+5Sil)P`$m6bvyp9NaSw+B(B=WdTB2O(ue8|S@M4pt~L6PU|4~!HU zUjRd!k|}S&D3jH=0SxZD$bV31nt3s_h#MN*l94~LFV}i{2$U)}Y2+#F!i!@7Io!69 zlQ1A>J!ja`Jd>oz;a-kxOu_%q10#aS;RcV~pB?5!56q2!Gy5H@;W zzyh`mkqnJ!?$5oVt(XKfzp5?wR%FSj8qw~S0w|X&I1R|)uW0KBmKOl^Ov zL98&-CyO}Vgo2{i4?z4$Bc5&$&meD@#teRA?Ia`Q*P9 z2%Uhn97%~hZu-d^p}^W-V%L_2$rjm+M+3Ic^|2vj23gWSBCi*L7en~km-~!fjEwYH z1QK{5rf}^idR4Lq)fX7!ZE}jYnX>8rm$sM~1JO;}C;JSirjWY0x}=42Mt6X} zGBud@7!!0Ef|%nRQ%8{{zeszN!0AN)2J2NCZ$#d|8FvbP+8^v`oD4>z0XgL5<)HL( zB2k8V+C!t53eDptr%c0ysr^y17zyq4h(s&~_dn%&sH^txS`jhT%MDTaK81R+tt36l z>Q17oAdefN@*ll`QDrMBG7gJ8ZjZ{9_-WtC7vE*`xLYcp!z!Tt65IDop4>i_@AgOh z*`9cBdAQvwYoLDHpJ)4|ZZ%JNxc4f@vQAEtW{8Pkh&=A=$~0zBDve@@0@F$e%!DMh zp9|rm)QG&vjH5BTeKViY68sfKhjM=2?d1ME`6)XM8*nY~uG9XbQ>Ma?9xKHFZey&} zvB##IABlYuzT0%{@hMDaa4%{n#NCWd_;BFYh?P~HaiZpn66i;nL14xu_z`W>F-pp^ z(?r5I>-yRK9u2YJ9t2m^iiDl{NN@HGn9zV*9z*cb2SNF#vrtE#uAwVJH*9V-k-B0o zl7OSAn29mgcJ`N$vgiQJ3 z0JqeJ(^A617&a1G%XUa-YK-Yj;sa$_kWh#*9LJZRT z7K8L=NZ>9Ch%+d7CX%VIph@=5A#VSB{fO^gG^?1=97DE-+*o`4KITh#(w(J(i@p z=`4b-jFuCLkc0;5d{htg14BVGltJa8M;0;gGGYaZb<%hA8kUU>jcB6fU5svp#^_wq zH{Z&S$w&-*nXqMT02_NFdGn^$TpTP*FrDpu>i1|3FBEaXdr>Qcf^ z+299ummVpMW`}Cmjl?S-z$R6ROeY2^_25Wcsf{x`hKI;&Fh=fGZX@08x6VNxv=|gT z6~CS6?Q|umciN1sItTeG#{dCM#M2wWF1-)U>4KIH$%p!&1`sXfVIRY*Qlx(Aqpk2I zZHC;)q(cYdxAhjX(hT}erOUM5rmlT5T`{F-{bK~$R89pIwgjO3evIixqIC;QljOsY z1C=Syg={wkEJ|QsLN99xXtq2J<4O{mGYTC+=$q(qs+|mG#zBWv5?TTIRUKtxG=iJ4 zXnh$WekefIO_rmP*#BIrMnWi4Vh9gr$a!E$?JGG^jm6QTq!C+xNJJlo5YEldozak5qKqfXD0wgD94?CHS!iVxKwTgU+hAi!7bophB434EloPZY*Ld)+E}7o$x^>1Esj+Lx#tsfG-^+MAg@FB?TC7E$>_4?-Wc*A!*)}xhE4?ik_!BJ2SB9aF`-lt!G5;nGs-5rzMmK52Cqw z=3Y+Vl~{GDzD`Aa%GD9zAg3ldF69dZZo>$r4tKtX5@M&1kljHb=N{^4=Nj1CV}N0( zSUs+&dimi`PTs?Y1Cz;oyFlpT@+R8JdNh`qKMpo5IP^+s$hHVtDjMZX3)g4YjUvWZziah++`A{arLw;G2rcrkA8)e4LAc*NzL~ zislg zD6W(m%@p;K2-?@a{h{6K zajt(h=EimMV}!_6P5Bw+mIcsr(npxlJ1= zk!Na@d%2rHz2WymJ=R+VW?qu}cm($=|Alb^A7x+rjC%MslEAMXk0Y)nQU4eb%R{x9 zx@oG0Lt?#*L~%W0b7UkzXVnxLHJm`?o)^I_Qu)w2_2YUtzdMp(v^ohIqW%QYXa0%`y7KjYkowf51-%N?^Lr8NxZZgO!|X5gA|EN(;% zV#7Fs)-f=pWh`2Rq{z*{jLGD@&3PTHdvn$`NTC$Dg+!YP2GyWIzc^~1Ovkur{;!>W z7(cw!BAME9%yn_qs6aQ=R=v0)_ih)>AN1w+i$(J=_y*C!>0oe5;0DZTywp4lCV(l8 z=5OK4&W=TkXRxb_tF8{v$l)a!!4(zDMe{J2h54jBo;CPGU}ao! zJq#Y=qWSy!hW-YFRW7b-mT)Dsm^Wt+gO|Bz{zH6a4a?vIL@PvxQ~M;8L*03)c^LfI zMe`r+D``lsYz@*zVn`mDa4n?UOVYz&Zx_v9?i+xag%{0JgT+K^hqZ#LPWT*6tdCC{ zv|btuBWDY~R(;L0$~@M?=lvz8Arr>syanX&Beb zgMtFrJovBi%|VI0saSB4+arQ;tM3(5sTU*4gTWmY!F|B@d|ZiQaZME*F_#HeL4W$s zAKN6Nwj;xtR@)i0ZHv}-yQpfl8U@8mq!_b?UW&KgO0*{$qf%F>tfmd1Wzg_04v}Y} zmi=G2)9u)7VTxn(b`FMs)QL^lS)YUN9K0L;LH3uxK{|dw8N9av)Lbnr=$r`#E@66D z3kxnvM(9!#3Ko1!<`(f)9^RERAP?K9r!oKz77|qAmYH2 zu*%?Jc$}|)x<4X@r8dVd0tY8K%vf6PDuYs{p+&ZQoximpq3&$~MQGNBy@)JYen;4g zR6-AIfL8Fr1~!8H%Q=`xs)zYDt4ycuPl|zgjQ^IC#?8lo-$?vG#xF&->WMNgvJAdy z;FiS%zRIrf(nyeG6}t&r&LAuZky2a6WBl^KOuOp_&Apje*_gKaQ=Mw4Bh5`zgw4o4 z@G)W~wFIsAnzG+pCNzn|w-b0WmJ9xyAI#Bck5l`@ z&xMZ7|3@1rIuj#@cY*NcpZaEBH^v!07$wfHNQYI(;P{90_*wq=qM#cS9G(SxPyU2q zZQPjD;ol;M{K{v*f=QKqByO@4`KM(^V)`ZRL}kgtTA^w3TT3zu!Sgjgf;{C&2hoh< z%Vw`b#`4?AUM-?gb0InTd&!3(j(h=+SjFhR`TNOfD9z3AA}aYLl;cToE1T~NUw)}v ziGI*%rUr_m%0~;?ZV~{pC+a@`ESY{JNr`qBN8)DKxQvm2h;}O@fO#%JtDSaFMgZ5i z0H!d@3+gM;?(+!B%`qq%&!7Y`CzC&7jUI68Pc^F96LCC$0 z2yd}L8q4tzivE;cllW+PB-<`iehpdGEVg=s*t&N6qCHIgX0Tbs2zeU1eSVQ#26^ zmHk+zr;!$!<5A^7!7zfI=*3Y8c#pkx04^f|^BwbF$1V)CdH!e8sB_^v>_K+n&n0p2 z7KHDxojv~_bKe0SRken@_c=3@2_d8r2n0w-fFvY=Kxm=J&_Y*=3ZlT!dlEtdL4yS8 z2pR;@YsKCb+ZF4@hInn*yBARqJ9h7tEARVunam9Fy|><5>#c{&v-jWs{qMfdnH|92 zo6;n@qo=n6eB98}+|iG+GJlZ&&Ln@Po3@e_@uS>=QauG7>{^|u@+UbCdrONN#9`rQ z`4W^_)CC+Dev$d;xfazD3cCL)SA?PP*Kb7k-{cH5W6M>=68tXjM$3OuTO5;u$wht* zd)@z#9f3y_)o38b6q3TfsoDLcY>E*fL}@dY6b>rgn>ZGm@-HaVpX0Py5~9a7(Ag*q zeN%|uw1Lk05N&QI={Krw79yFx+2$W1vBVHfnH6I`AxU-H6z3yIM|dULogg_Eov3>Y z=T}rGdz)nuV=@^EFIz@?XB0wfDJfsENUD*K!5@AV^7JO)>EL{as)*!y3sf;?K6&=B zZgQL#!z^e$Y-wW5R`SGVfv2;xADECHl2aa}p0i~t?R0kzCx+2U(&0nfB#`fa6wkbY zXgxhxnN%=E^q?#Je+n2rAOrEG{v*t`L^fY#^VPM6-hX4F%o#i#M!oz6aL$3Ic;@># z2j!iOdD?tL>?5B)ru`t`0dy{#Q^TVGk(<57^V9EE&d^_Hi(yU6t76>e{*~?)UioP z+`sm8Z=mjo?}StER0^u2g@y+TL-eG(hv`!?f0y%}(Srl{VN|X94hTb!lhk4;1otEG zT{hx}s$|8_bBjXAEf%SNa+)=kl;&tkU9NEWm5n|Pe^QBMEhO=BjJjQ}cJ6I&mlKA# zk;FSto-Wrr1=&Izpo}q?bw`MJt0O`DoMqNT-4L_(ljCPhH(l;>?nSqa5I+_oKHw|^ z@l^39w8l3DT^@06!QdX|cgm}viL3&0@D#tx<4!&}!VDOW)5+0}W&ejWJl0xK#WNfg zZU@wtoD__z{FyI)-pwc5Cz(tAdjaNg zrIzo1mRXI}&$ozRyDatdm)ZVq>71SY-P-UMs{WoC3hJ3jsMN1;y|Rew<&F^g4#|@I z;!H0-E-zpsLklV99g-!58uAzO4#|=(w2qa1G4GHp>8hc$Vwe>rMVd3SxZDS*o2KO! z&q4u8x@)MYn0H8)6l=~t#k@nZq=%*jilEXPpCgR`E$x*-XU2sK!clPG4GHpIZcE0 zviNSgI#7dG%3|IjSrX9TZduGbBufTq@OD{TLRCZ9FL;M!G4GHp8LGjDWN|K;hiP!H zEan}OC8ZjCLKZ*CvJKbZK3R;3N0f}v;B&H=cSx3u)ZhVGEa=844ZbXk`%&{~4IY%m zyhE~Nj0WG5#i?W-tHJkV@%MD{bPawah2I~qT7|zE+rzo9p&_&$gvR1eI`2x1Fa8u# zIaG8AfFu>Ig-j+NR46lL|6#&qP+CQ~nr+(VV975eyl!uu}B* zisx!tYc$f2l6V3=+23yo`hg`jh&Pw5;3QT!%8!QcO_Cs3>i-bWJo_{c{@(G!08Dkd&qWa? zc}MG)!Qlfd+kzTc%c&?du#y!6>*f%W1FJ%&#DOHTZY9MGtf!;$^?S%9C6@IpiDqEE zGS8m28sZ@m&A|E@LfwWkTDQn@Mp@#mPHPNq1ITO)s~J8kgB)gH?T+y^LVQq&_<-|g zqvEs3VFp&de-HE942tW?VFp$&f*xkTaNI@?Gq65^7NMO~#WNf)lEVzFb>KKv$JgXA z1MB6j8_^LOSU-%%JgZ`)1Bkk%!BVw`&-b?kDNiH#Y|$U^t0$qnxl{?(i4mD31?2ffo90)P>9f!?)aF+NkAlo^vox8I3?t|9>#d-7j1V70sz@PDU}t3c z$yb<)nbW7w5S?d{g55rK3`6!F`0vh|KD|PCA66e22d4m=9w{n z?FV@c$3FB_R~13u4T9`Ub4>{9?oV*!@g9y~+E^iul2>UV1T9n$JPmnXB4V_tdpWcg zl@-CW!N9vk!bE4xKt0X{p7(jUDa?sH&f`Jee87u5-1$M2$#))0Hvh)XHk-n4T}AL# zaPfFbtJFaXz5wLAAiMMTG{u*b#|h8h9M9~_$cj8JXY$zff?opW%NZ@Nn%wLeXr@h0 z$EpG$_MX%f738oslCKsF`TiE*8_*FC-e{A;2WlN)3FZ_nNl4DWfIFih>Z+SO>JHAt zgI}#g84jKg{2Ptnw*$|QXS0lfce+GR6C`N5ON%XxwyBX@5o282*vstEpu z2k#BidNd`-%UO6`P&l&G&x6=tF-W}VMK}8Ry@XYK;`jDJR^(AuJ%SUF<6A)c&kn5mS zX7&xhA$)ggQr!HwkRuXqo?cWi9z*G1bk7%x3^i!d66~NBF9U%rO93olb02T0XIot_ zmX!cq-W8AAz=Se$VG|5Oh+ikN*7)2CMo&n^zTV*plVO@Jf@yL*kjhfM0(R<;~yQ9TI^Y{Yc{ zJUh#_B3rfwkL8On((D1!x4i6PWXtOKsKx_AmqDl-ihc-y=gP8)$d(P~qaPkC7{g4= zpxz5**+s-I=VKuraOgqf&>O~)cdSEKYKLmR00TSmdxmh-q~5b+Sx?~0^YC~aWh-xs z%qPf*s?i7v`<&(d)hfNN>|UVDZpNdk5KJ7TEz`GF@ylgbvsgrn#?E7RoBRxsZ^>?Na_>OMl2uZN7x ztzj}cuVrral2O0=(Z*V$Vey5LXqY4Ct7ddujXA0bsVw>>n$L0aYn$i4fb6GGS7Zt^ z!KBU6pvu1nA;`=x$n0Wn(eS*njfz6dw=Zfp~8~0~~3Ljxj0p+zRvsSK%SYmDRJ2irP(QVY8e9jP2yFnQq(> zek^?(p><=fX;HU z@~H?z#lTP$?XS+@Nak;#yUOfFQ-%c(4?HAnm>#SM-2OMlZISaK^Km*{TZ|6G`j=Z z@UM0w%<*-%EHY+lO!{J!xl9MJ^tBh00?Qs#pHfjGC)E`Gr2 z&m=Vezs&`*Cz&0A#^KN8ws}@XIT&)$uW(PK?_r?T8{Ap=3+%$*nLFM!@&ky>Kzi7u zUzzl0q+ANl=0neBdE8J3yG_P4-p4i}HI2{Zok@7LPeT}4u&ezOc z)f5J1Ol@7&)TRwp3_DK25!MVRb)vSEDm!vpgl8=G*c zqGmocviA2I{?|+zS^Gs`j;Q@}Pk3MXUPUC_BqYU1psvu6e>Jwxp@iiq#lmu~sm` z$eYSYb+>c1$Gf%1sQ~G5%c(j*877VN_+YfhUE9&)>I;oB>_=HwGmWxbqoiA-%8I}o z;TbPOMe#o~%61y5BPK;$A2*6EX?L_@ReX!Jf)PfZRz{YvvMk;9J+#?2=wu9oCH>H- z0gJVQ5eAxMuoNPXXK-pL$@iZHt$DnzGgyw?6lC;3K?pk%UjLczKc7q+I1$C;w3mOr zLNgJPb#gRjyCxU$Rr4D823Xg~d+_9t_Bc{IrOj9)_Y=fmwno0H7?{WIVU7G)fhv1U zW!K2#CjGsE+H-UXWPSEaueSzdUl>4fBbrEs>^$2%^AkKn<(TQw6Q zDB-?o+Q-QAPRwS*!xV(C4Nic87rTauhLbx^!$gNopeCzreFIsQksPq5_RS-a=e4Gq zTNS2(8^=8$^BPZgu3AV;@e?suEh47)mm(iFnFqfD%rD5oCy|~3^pSNjByEuY>Iq;& z3^y$s+7sOEEJWw_p*%hntI1aeU zYqESZ@*hF(oE^w4=X$fw$8J*g0ZbEJzpos#AG&G0r^TlNOfqN*GWhnedizys5aNtf+4vrwKP zlvsw1Ww!ubcD0(J4ugxM)*hB{W6gem9H}hxFe4N>U>^c=#OTx!Ed@Tg7!-IUG1W*_ znACZwY%|belRN_)7JI#+R=&9t_3+eoR86Zab3|~lq19Ur|7bWO=np0sTM1P>O}Npp z+-6yyZD5^F*6RJ>WEm6L0iUh86ClfI1s*hlk!2hQ=7=(0q|JTuV1<}InKA%)DV*~Vh6V1z6DQN9qH<+!;R(&6V?xhexi`#WEl zxZ4k1P@OjmUjSl666}9}FAE^MMP3ENQ<3AK+vHkVJ`ef3v%$1Q{vD*}P^dkcRCXBo zvRCl{?K99;fi)2W62!{3BVV==kL5SwQ9RPKP|w#m|{{;Rli}&kES)c zL90Gvw(g(%I0Wu(=$`=$PUxRy?4PP{R#l;#>?P+LTdS>2+e@AV1D&#ji!Foc%ycT6 z)|gXOpFk12+fJk4Z&s1*ZXP(zcJU8Gc+E0|cQ?IjVlg(>bb|offo1&A2!z^!-ZgPR z4qwgv!WzTB>Xvh`W1l8OW}J%BbY#mW;;~{m9^-)Yp6f5$LTnu#>*OvxPs32SPTquE zRY$~dk0LyY=bfEc)&GP`T+ecl$CRKJu`B4~J|fO=^oCMgER)fj`B$GXJJkFeLes$W zv+@6gEytFaviZP^6+F8vY6@;(mLXp@pO2|{upGls4))G^a5#Hc(p=Ml0YytRX(%M{ z@qDn;$AeV3QTwQbQVes36$<%_rVkgM+AyMavt3x{{_}3rlMg3 zZt{dIJAk~dHXxb}SDS7X6VXiTx`<_D^iCQhMTWrrwc6;$=Yn39iO$Mp#1+QBJ4_0l zUjPjS{lUR?L=XySx;5M?^Wh;@bjGvZpnDJlh0DRt7Wa!$6c-36LX)lGBhfX6E8kH2 zS+0ojO3u@jV5xN>uA29ufR$oN!wo4^DQdj3SSuJ2}Ql%&Tr95~3>hJ!P_bzfl$dy~#FxVsfX^SZ_34)2POZXpQaj zScvLd=L^C1vKw@^mju9UFNbI$yuB19Mz)s}mhhcnh-^ArUc~9Uj~w2)#y!=LeA&UiTu4t!48M-r>6agJingtCxr=z zZ`86D3&FpNU^Z%f{6y%DS|4>8`Ua`CVtv(7uh}<^1P^~Qu5GKgvihX2*yT&C8KT83 zaBLipy;F0k5}8blI5$%6EESw0A1A~90tqwwR z{(F~V^_j-+gyz@(O`0q|Is#&=Tze5%Hno7nfI4W%9Hx8C`pG&ykdN)+EsiD20)YHe ze$Ie@XW~PCfNNz-(9_pDkg)$VeVhIa%0kuy=bhU)>}4m-{kex!uOcI{f( zD;@Awz4)#>f2};+7VtI=?_RN4W|w2m!|1VAdb@)%jvr3pP<2)H!ZN(iS%kF=h|V~* z(-e5K;&0io)7)apQ*Tnc(>w(qw$rTH1-)F^UT%bLXb?VSqxlwi*$4h^cwRKAGkyu@ z>S(mTPuSu@FN})dvNK-Ktv(c$UD#@Q+gi3Gl^RNekcjBE!%+|TS9K%Axm-VC#F~`` z6u~*WW+h16dRvlXNH!p+;6njXuZAqU)!67F|EMup5wVm2p6{Oi11{kT4)>^r$jc9bEwQ)IgELe>HjckS0QCU_Ew5WOu4C|3w zVF;I4hGk`c19~OL%V?VSJ6ZM^rA76Z5a&jF4-33iZL~#ILM2WnFmgR_bbV}fZYdiI zbXf@=SpRhf+f>LVreZ;2_Tw|VC{ig%+>cjiQpA3I8EC5@cF@ow_v1wv2}Aqwswcq9 z5Pmcw0KGySGimsqJet;69IMY?rpsB!fp(iYQ25#uU>|Lo0$iyJ5CeV|;J7A56rcxa zEI?69B^Gs{!1u(V0;mIp10%KbubVX7`Dj{W&Wq}oVUb;KA9Op{>yc^?aBQ2`jMCa~ zH)&+sJPwJ7p71KmC#u_3X^oj_4H_NbsPTTI>B&Ylj)O#m#sOWG#*s#29W~ynI>w%8 z)+3sAmtj4mSZmHVc8?l+KQ?MNu~n4aE=9`jXGU27bm!KZ7mTt$jFOnq0kSEV8@g;o zv@-q$CxoS_T8Cw~R@NLKBav}sh?!DwYim9nbwkUoZPcC(SX%cRbVvY*-es_MbO zzWab79JCBI4ShG7*3fsW?uR(1xWq95I*-5Hpi^h29?hy+EH{;G2*D6T&3C3`R;Iv^ zLaj%w$Su|iM$~)PZZujw8AiB}6s-~eIc0v7r|bmaIn9wJeq`&0 z2g0xt8d#3E4gHWwp^XQ z&95p;16oyOG|hX`QB}E;Uv66BR_k0Zw|pJ&w-&Q_qH47PL#ah9L(0pVZ9KBLEjaH`zl*d7nI_vb1n+-ZmS$ao9 zr!qCRC{q?|r6WvD>!CvNu(7nkXxMKp-KZ^XGnOpN%fkWnUO&0nnlBC8I3IC@F4KSw*z6 z&gc*Dtj7hqn7g&IG=MB-f1_kXqsk6OD_f?NRsGE<8*ikxnH2F`Y7|@2)M&-3HCe0` zjHqMYbOMjRHbz8S6yGclUNhSF8m$MSwX51Gf<%NyZ*Qe>K&_5JFic~6;24P4ji!$q z)mR^`@pPrJX1bC7(MbInExkp402Ur_?SqQY0oOIg6VWd6fGYqv)HtvheFA(ujI1;G z&?y&>J;V~88SS4CVBy63Cbefqkp30YJc`^0X_{_0x!xk>@!R!;j;56;rsk3&BTkc|UtZ{LknE2bH&HO$^XkG1sJF-rfvpdMh$d z_4aYl=xvdqDQ~Mj0+u1T&M12-!3sJF)@L}1R0qDiw1Td*HQ5|+gqy0&GoazZN zwh2q0W5hybRc+W}tdG}RIY$yIve5IX_}10KuWv}3+5(W* zLR+!``7HDnd~y2vOUd~^buD#hP~y%L|4=w{iM#`WiFGqCn#B-`)G3ccKHncyv_}*z z*U&DJ&nvhn1iz`^J|Xy11qTegP5$pXe~D}bd$DfNNM0(d5H9tNw3K-dpz!|(f8F>J zIW~?;18a5B>gBFMsJLP7;2!o4o%d`fnP=hB=ooRXdG zdAMyzcDrm}fC)SN-ucz1TuwFq612(i%c;aEw^P~c>rbLM{B8{wSm`USe#s3Xw3e+H z!7pq{j{leUBc+{sqfrmuRK%~`)xA!59rEXToT!D}WYh_CT+gEM|36&Yk~ayxvR?iR zKIaPgCz3sMQOe8Y>^ZYUt}DkETAWiaZ*B!XpG?3uS@U@=lk?})h}>8?5{**h{Pl7u zx<_s^c{JJL66$A+H6H z+gJV%2aN8~P%3o%4!oO796P$qJh%Wmkl z8s$f2RHHIh%Fj`e8kd+i2991PXH{2<+)L!qB>ZsO>g3(-x!dLMVAeQir`(3>=~uiP zeYG5cz$hoPVdhi@4seVBRr& zi96+U2wJ~Bw_(;XxXhh$Cz@Hml1ni38C+ne?138W*Y`bCuEu$qj&F%EuU|iYdaqH= z_!~2U>7R?5G)z0?c98oGxD6#Sx|)vq4YBOs%Ga(JUdXC)jk*(4XaBaoE8vU4{BK+@ zGwMV1r2ewmye@Y5w3xR~!T1BwJO|*U9*DKn%7oY9V7k+8O7%eGG>`ZU z?b#8lP$a*+0(Dzh2CMiNn5NUoXi&0js1VPf4xRsw#DM({+NE$49EilY`$+e@bx52T zw>glW7+3$@dRPD5de4HgD*Rbbx2FdJUWb3WKBj&FR5+=y9kZQTqs{C0`7bBxnc~_eI+%>@9W{B87pw@kXOt?KO*5d*?aGQb>n=GHP z7%DX_P57U3aMCsoKXvqWi9`DozIdr=A}$|tzGQi+RulIn-0-CsFfr~52>O~EFfFbP znNtmz8P@?G_*xh+H|}NV@U=8xQCu#{>Ps_VpSYEzwlZKKZaiY+OE=(%xRV6i7;s$N zowVE5fRp3=^f^Puz`TqB_Bfp3>_0|}2X@E#dgN_eECSvPyHjN3OD@Mz_|o4rrQ zWzgTTGEdomPNG>n2ajXW!9~*vc=+x|lf-C<_@((CiRB-P_wChz60z+Vibr*v{IUCc z0(x>Lgm{~#&(Gg2`BVdoXYttRIFaL@&;KH5@g?Ed6yX-B_UtwR4Sx}PE~wLnqV+oi z6E8(75)a@h6HAA|fbgFL`D|qSIKyJcqW+wb+OmjU4_;@KhCH#8NgJ&pe{2docE;$| zm>63`Rs7r-9;d~wgL>y2P0NgZlF)g>VFiEuIQ9=hLCskZJCx7@4Hd=SM`)pjdWfWl zJ?IQT$?ej1`H|BDumP?W{$zeYA?wM+tE^LI19%*OJhOQoypfktPcwAH=7*?b2SHBz zsa53_82AZpQy~8CpuC*UJ`1qLE~p4*;%)KbIb}i~AY1AJ{P`$mnqs6EX}d$zM=W(O zGlNM*jZl@(&!D&Rd-2kj&U`L87|7QA21I^J?$*lv!*sv(Y&v+D9O)>~EuB|dKMbbhQVP$2Ca^!Bp*V4kd zlFC0Pd>Rd$D{q6|Hc;F4CBo+^eLSRT2iMM%^bNl;&rt4ZqO;wf`7?y%G{x&~-?_T| z=UyC*f%Q-(@C#(t3FNbW;^P<}AM^1B9wNIxy6KHU=*Pcfa$&=U^?P3)@jE<&UuK4WS5op z=peA8ZKSx2Nx@)H4^n4y1%Cm!!xQ9Ab!j7K7UR=G+i(-hnv zBp9sA0$LgTRE?NzOSQqnsuD&G2UQvTOu^$vSc9Ldz-og>R6Z7yoi=z>!A}!b2ES17 z%271v^AATsV(vk<&2i-97c5RMk7C(l+D=n5tJaN1ovSuf>|y!#5>{=f*uysTAz{^q ziaiY9pM+H#>fvF$+K&OO+E5SAZJ-Y&tlCfyrEeZ!-E4X)7_BJtefrZ|fpVN^GaO^0 zoJibw?PH?x&CdK=s8jhkP6<7uY&Z}Y=b`Ws!pg^S%18fLIO6k<1u2F#oI8niG=qIJ z)(x6{Z61d*IfQf>>+V@M3mJTz$DvS8U?hO8yTD-A6P{$S>nxT-qnv86n+&!y4>!D* za8Q@BGtV}>mm1(kSoGFw01l!{i3j2_R?C!B{P{yu=pvJKVz6 zn33Fw8Oe>9k=%$G$&Hwi+=v;;J&y}EJ0rOfGm;xIBe|g&$-f+Rsg`G+F7aHRc|*&y zcrMTIhw0dgu*!?)^2{4rp2c%{<_#^+;<-HYhL&gXT%LJD%d_~Gp~D+mp2c%{<_#^+ z;<-HYhL&gXT%LJD%d_~C1VhWS_&aGgv^t^3B zJd5Y@%sW=TqwHgO){JX1@BMzpLaoWXdQIlttD~dWWZp-0aP*qY8@47}1v#IeEj58< z;slep0S{u?OcIWUW24t(NtLK4dF?PfMyb)*cJ^_m)8}L4N>(G?@R`s#i=Y~ehf{I` z!A#he!#Nb+K`@=*h#F9NLsW7tb&t3W)b9aubu@AOq3ascv-Y9fHcKZglG30O?eB zI@RV3V3KNReIKn4Qr0Vs)r(C!5M0{&X|&EOYP9v!Xq}h6XzK%Moi`zA>j7FnL{M1| z(7KB)mPleBm2cBvXJxS77_tL!Cva(lximP9pek-I4Xz=miknM=HxpFF&7;8=2r7el zH24)j(fm&s^jyLgC1rC?2%bwdAik;L9Pnr}&H-^aiLw7yKdzlHM?)U`$u-4TED{qIX~xCGgcz4- z$S=&s*!zOw?*aZ5n!kpa5dTUIf!}OA^akAj0hrA8n-`IbSpPS8`hOt$MF!tz_vjJ3 za%U0_dQ%FNx7?Da)GL3uB~RI|vUR!0?o`=zKfr4^Z|XNp$W+jV5(@ z0{NuPs2epaBsZ1GoEDPfrE)%7By(CwPL`#>Vp>RMlS@u9U}7?-h2-W2OiSjpkeq73 z%w$dr$t?_+oBT4YC$}_UQ8K56Y~=YzmDx#r-Lbt&op?8xoNSLsNrka1A)nYh-N$QJ znzzx>6TV*&p5|?LD@H$>+EJPgg8j>;*gnJxUm4tw74(RDZ%0G7kjHm%fDj{vmgc)Q z3`!I&INK%PpTa6uvt9BdKAu1%@70wqd^~|jepJ`F-^UY(y z)Q^aze{l!Hu_?mo2lGjj8MT(jL7IYb%>Se(&Ht2#plJ0v3gmyxXJ{lCq~2viy)rDo(0o zY0G%hD!Zzr>Rgp9uTja0ttwf0vr1~8R>`VQRB~ZFhUN5TbzN1mW`asC;?E<~o2^}? zl6Bivvi>2JY%}%BxkfY`aRT_Nrw02P#={LM1gFk|}L)I$eVW_W}8Z`eOM*eeWa4>PpV{hr&biWVTej@JXa+*U8s_quTjY@kE-Oh z4^?u0lINdP$^K7N^1`nw zIgr+taxeB($v@9i$x9ciFGx^k+ex~A_uE9PV&!9 zv`KF&*C1_sE?8yTXJh$%4WpK9w-EVrF|2rs?#kGY9=tl^QuG0lvAz?iBJ-ahUkpjv zek|Iy-#wZ2W_vZ9%H&OGV6JPXMUnj=m6FNDdac8REI}+CAX!3((|`g^ zblO;eOs5+#;K@$>J0-H+BFqt2VFvPa?49vZ955MkL3J{2KrMPQGD_e@#-IV14>I0{ z#>|X5XxJw*YWm|tIGN!Yg2c(_nuo-b!9Pt?=g*jg(V#9dgI}K2WoG0Ng2V>0LgWh= z+aog$z}7!S##M;HIw#|61gfsiBih{zceY(lcNS&5t=w4*4ku$@4&7;uviUQ5_NF`k z#Q>I>aZ4{F_&Nmp?xf(3jM702={5vRW<1@4;>)useldnxW#xA;=fFKr$GsV`oPINk z6>~H6J13(C6+pI}>2Tmq#6jkEf_d37#mVE}M(NnvdA%#HdSlG-I{JK6V8?W)(_kSg zGZiYkl%7+jV+%)i$U*p}=qx)D(b6J9m6G0OJSXGyHWSZM$(emra^86=nZx7L^fvQ% zs${`kNOB&6`u;SP|0+V^>3&6g^4)WVK*|XolP{XyOoUxJtKn4VE#xmaN}mQ{Qg&1P z_hZPLO!pJcL90{`QRpH%nxT(BjCd5K;gp~Ijn@!VAjHrcfhz0MiqP^oY^QTn|u z-*Wk5iZ~O>>k;>+i{LGcP=$JkB2JHTNdUBR#@)18#XwjTapII^nP51;<{ZW#3OSp~ z)6npn3e^u}0`-d`PN;GYLeX>xR~S;Sh)GLON((%ygWYAs@hqkg&w)iek^^B18_s=$a7kYPWTXsZ1BUSguD)O88NSvykf)Suh5e za~vHoK6Tw?bM+$Ek^N^W6Ho?}=U(E1nZS(*tFQ#3;Vr{b$YqYKCd=5c%26!3)f93C zB$LpZ5++KhH`N!;!Nrk$vL84On`Wb9-JST)G5sg3G!fGyFVNcQSbrbnRoDvo`Gj1E zVJe}*#7i+Vq@o8`Q-_MNncLly>nk|}H!e22yBGt-3(6xtMbF(Nv5_=;U}qPu#;8auQm)Ve!d+i1;c*Ch1-95<)BB^0SEROnCw!NqUNsoev9}f zJszeefoM3!1#uylwemuWHaFc^wF+w21t{W*S5Cr&l=N>^J{==nQCtSg6Id@L&2n^? z4RsjZ)wCa6CQAncb&`-#I<%2OuArp{qeIe`a8EgEn3%W1R3X2hseR!bM58PjNOXa? z`j(Yg6(n8iP~3)^%W-)Q3;I}ijC#pWG3d77hmb-Ra+NMulc`NuG(1=7- zrpS%-=T5`dz*5M~i@ct87By-|YbxaCM)vIrw)?_$G$=~pzUNC6CYaXK$G_Z`)`t%B z9N<4#K&6#v$nT3oA=3tGDAAXQ0;LVpP@3;r#5t{0Lz%ug)G}N{xxQbiWrT)`d@$x?gnV0hR*aoj`F9SrJ=KY-4W5W^E7m}@3KNbQ#CZn_XxEFH8k0G4WXGDI>+~Z zH$byBG{yG_bQz#d7K4P&eOR@=y`*b$5^@ z*3XLao`pf9YKiF6dZVv%I~*}?V#8up?8K6<^=1uue22l?dW(kqz8BFQTVJf9roLSC z#nxLjl<2!0vu^8c8fxy#NB3%diH6dAsW8_1QVq57?dk$Z9a!RT>-pZmDxvibjs|#@ z&wLYdiNl+X`+@{OvXoRi52}g6=E}PTYf3g>w zb#16aCnyh`g@{<{be^%fJmB-h6mn-TA0)byM(R68o+}bVL911jig@D2$hEsGLKHb7+$1e*%PBz>T4QTcpte^F{!1V ztxitO5=2FkG=zY$F61eO?2ADpwTI!0T&E@|=E9%JmI>+9E@o&W21@>eL~G&+h}?r3 zO`Q?0i6^Ccw$++=LL!GUXz!jHQJ$N~I}nA`kHd(fkOwL98}z8ul_ny}OH~>ZG#;|Z z577HlZ!|2bG7ObxFLDP8ocgm-%+_G4iYGGiKdg;4(eXATI}dB*G}g$Tzu@3Gj{KS) zUfQUKMiI|>@vZ}&$7V^kSZfA??6TY7cRjKakMLg1yzpzk0HK-6pJmQjTFjS^q z7G+g$EsA*fCC6cWPW@Yy%Z7o+V)8XM*DZ}|DEyTSSJL@;;o&4YtUbs28KNQ{tH}*) zUq>3;)X^*KN!r*xpyfJxbXK7y8Xbtn3waJF&m>E4M{frjSc-T$C(kAEc(kWVMVt(eIG_C|rDvBY;B4~1Pm5BMhJLIFueY=o+!#|+;L@h*5gGMv`a9N|v zS}=-CUC^;(gsj8x%33s=mx2n?HloGEc;ri1)le*a0#i;F>>}4$EP!9$j#BG2iW>96 z>^wm({umm6{HZ?Wuec7AO9dlQ9)sa9U34sgi%e1QC%HD!=jM`!k;KY`m6@cq##8=) z+Kp)~-$XOirPiMIBduvq(Ar5JXzGYjP7C*wbIRw6m@GLTYdR&^Umn3~FQ%jX3ALqF zDKlb#dOtouy@hulL^l_%qEwgTvLBV*sXi2O35br){&zPYM|YW0o_fb z(S}3`2Pb(Gs;J$(J32*4D=!Ag8JG~-E&2@s4cELgS4mFv3|a=7~t;%F_rbR zN%%*@l=I^NA=+Fk`vy!ZTF+y z$hM;}Jjr2u=v7Nta+75e<_8ASl^yz1QVe@DL~SA0K#QRcCsl9chy6g7XV4_v)^au4 z_E^vw6eMJWdPO==nQ|%W=T;1l4b(zXKSGalJ4iojEP`4(oYci=zizH1At85YCJ zN%5ot)m1LY({*gM>c(;uu@R+LlCQom_eh69r{#e zG3+z)9qt6;aM`|C_tFrbw&}x+Ah*%jIQb||haJ2t-$ZF1Stejqa?g?7SvRM$j3LVs zvdol^VFhAYl+S9UDJ>__Xo zxmrW2L-8R>-wUy~9<>PNO~CPdaV^1o;Vh>30^%HPmKl%|t$ILe&$ zko==b1EUx&`6pf6i)CN*&WWlGWe*|q)fn$w$8YOCaGphGJ!Ork7j=Q^5aI8Zz;`kt zaAk<_TsT2D6#9k%FU0a%<1D(X20jb%ePH=i6jTYd8xpNUIi2-s1dFD22vLt&s*+`u zY8%-D$#NA-{V`rUsxX={pagKNpV^zrJ#1OHEla6rl-LAD+P(k8V@WLJ7 zERL4k79y59%i^OX?OAwy!geb zLV&}Yu5R4H5XYa+5KLL&97hHI6XbsA!TkNjE6cWuQ3IdiRtF0`KDq3iOz!m&t zJD7$To$WS7P1%N{%GP%^sa&CE{dNZGAl!;B+Fh2xt#XUe*`hhZi}r@)&5Dt~MR^of zw4W`TTklS2bihf6L&__6w|fod#c-oxMH@o4f6hSBo^;!A#xXO9n$8;d3KoX=K62xt zN~zd%QLhfM{p9vTEuCVOY~kg7!3tz8mh%!D5eO^vcb3;p_q^BSFL<%u$4X2WskS=f+z{gZ(X%zG!7FbX9(s6qB_!bSz8IxkwM6yM<>env*X3>D z9gh|p?nnb~nCE=&uLbhGExP^B1Vq8PA>M$uJ4W)xl`HqP=UYPpv%OO>LNqSWu&gV1 zG9*y$eLeNB1eE(anBRp2R(e0cDy?w~p^}S&jIOr4>%66Z8O(;o=&1+ag2WZh08GgAsS$~WN*HwqC3weI)-5-;!Rl60KsQJRQZK>^-X<51 z1hV!yH=Gv*d47?Z`!6L_s1r> zu{B0l_c!k=&7z2-NLGwm%`ap1VKl+lhMHC1~}-J`UXa63p09} zWy|`>8Ho8KTw8QG=2%{LrjH-bG|DRqwpz9)sWJWD6NJWxmbwy6^k$Li79!2VV`bedBv3F_JCJ!C%NWB^eI>$9H>F zal;(sMp?0C4RFW)FB}|UdEJ&VS78+U3kST-&R@5|U1aNvfyC_@vjEP9YYTJm z8OxS+raR-W+LejmmzLL^5OY0Rceu(>w9)HvB&jTCEpP{8dj;vMgH5h}!IoKbyHkMfwJRaiR z=01t2{55a54rGeEHDt?xZ|l+g}n7=yDDDeP1;P zw2L2~^8STBQ{ggorMXvS&4CIYXiHFI>mls)yP9NoJFCwUYOGxd661m5}?i1k#?yI>Ti;t|SPFu9RQ~YD2!H-iIQr4~c z=~t|O84!N9hJpOh?j6M5pEGn!3=laB27&e|nDIldF{?3zKc?+qV9Lf}ZJ~J*$NYk^C#U>XSl5`$u`?Dg z7CB2ZLzsbWe@C$7&^D-=oObGqK%UX&ok5@4O9yB=&(EVjKpV^HAbYiHK=JOcoX+wY zG{=>OYkDeBCGw*7Lh$ny2xjg?G+%SE-Wq zy`8jtlp^QY7A(nhuTwN#!oFPk%U&Sm{4g3SPhluHLWYmiz|dSSL8GWabCO#43^{>y z*III2s9YSEOtUJw15h72z6q>WFwbcgaqYR+*(PjiPp%F6sf z^7C4JapmUmOUj%d-Dvr&Xjm*A#qGq`u$S|P>bRRj^tc8(`>&z@BSdf7K7@jiU~56cDg%<6T|2v>F}Yk5{UI5$J4(Ytuv05Nd;$%IJ(0B&jv#hQa|wn{B4*$ z9oblwjrH@_AkM!rQMMh-$<3LH=(IgB1Z=VXkz|fXw=f^Jx#Yu-Mfmd;xdv9-s$X=< z1oin+A+Vx|zj=}0rhxXf{=`ku`12T1bQfvglXi><^MbwT8Rpkg-J{fD&=Xr=Lsbv& zWcQu%LwMa>_rJwIgw+P|QV#hTjkNpC@ySrfCM9Km`E+lf?uhS%lO(>?kZR}N@PO|` zPy8&P525_o=Q~3O1IVsbUod0nak9r!`+fvIdn11JLsluW=Y^14EHdFskVL^cQktVF zWnbY8!ir{YCg2V~E3twbNW2`QZuZsAz3uIC!VsS#@eY(H`+BD!TZogCF$N0`g^0I0 z62x@Eny4FM!GDSQ8PiSnUCzDewh`hP)TM|IILkmhReTURco3ich;s`nKFsd~?;wGa z%_0X6$+I7K^1%^iz;LW5M?04NAI@+rd9)Q(Ji~DtIkqqW&pKa0>!~_kBu6fFyx>eg zy`7??zufJB`jV4^QI#>b(M9_t^8o+%K&s!5;QK#jHC8|4VATJ+2S0l5hg|tu+|XE< z<~ySlp9-~N=SP(fmP8k(%MN6bZ1;)COnMHF6@Lo+d?tDFyCd+4E?*2&zzI~7ku+SY z(t&(to(xtIiI_!4vpKBlpX zJ3T*LsmX6cHR%Oegqk0Kir*pOmj|X)l`j&We)J`ks~t~&E{9V$L$hZ9gO}Qa*@3S@ zBy|WZc>*6W`v5?=4aqO!0if{GneJ{&(basJG;Kz=LFa zc;f5yyNWTQvXotbiy)sUzwog=$YZ8G z_$CtiQbQi!#}xTWLw;WY?8>h-)KoZBAio(8Rq@-I#qqND4nc zv6$Kk`5l>-LI@Oo^fGl6^7|qw(`KQ!eF}wAOkDsnzbMfv3CS?BpWmEJ-307ykt!01 z6aHst;T~l9p-PyF>C>mr5E-*b5q|F3rXE1{b9m|or%$gC?!)|fjF|c{u*V`r#nkVC z^+EKuqA0JEf7NKxNn@=3;CU~mO@C*>ew2vP#=lUrXR@n13~yFz!B{Y zFZSZ6V=fq$349iIRM{y9nn$T&@trXTR@MX0Pguh6bHvQhmM%^VtPL*8%k3AMO`|lF$(#B&RRNtiXB^e$Fui{O^ zXQ?Rk@e?siMWBz`idCu7_f@4g4vhc{-z;mhs&Z6&$J^thFg!<0oePd;XhxcYket2% zL#Y}*{TxcC)&be2qb-$Ef@{^w!Fe0m-}fv=!h&R)<}v4*03ac{PQV`_|A2A2Eatbh z8nA$e!Z1ZLrc*QyFL91d`l+n(*n3}iA|C#D-hu~-Vlf)lUu)F=EGs;!R zJMLjQ-R*VDYX*}3gQpkZm(K=&JHkII*xi2YjPm>F-!Gp2ihtjNno*?x>Y*CXBy8({ zA^e+y`vWfTLHqyljM4n#YFd&0hlf49iu^MOpHT1{fU9b#|D@+U@RuKEUV6EpceigE zw{qo5Ts`V~SqEJvQl=-~GY48EX{!&Aok-Y3LH1PUMjz>YI zq4F?5jy|oH1?afwXwuN=vx=inJ(%r?b0J3@cMh3rc~>Tt?NiFsc(ythIBGm@WYid@ zv}AUQ(lT9ZS#PuiLH9hodVx_;Z`d0Y?2A_LxK^;=C=fYH!JS4y5x~fD1Pz8Grz%Iw zxvCs7V|6)pYXx5$1qV-2AabLMuuLl$VH5;FM-d7DsxjTLHxz+Ct&WJ~Yf8cDl~w^p zN%^}^g;E8rj>D!I?3K%D^XPKr)yBlv)d9ldH^z{CPQXLdUIH%m!{b!&W^_NSlW&5)2%O`L^m$gxn-#J2 zGz#r{{7dtIU)q+B6g&{w|3C-(Vzpu3Xws<8-2V*iN>P3zuBgs@l@YwVQ3axQ6*Q=> zs5Gpe0g&OW2N)U7eFhT>=ezUiY3;K{fhbTea5=I1Hlw8oxJWHQg9&LVUZAvi#+k;i zw5&d66dXK7fhde>ikE5y!;FFe=$W>+Y9weokPV8ZzU>KD^Y z8d;oiU?wKy%{NL}ot$U1Eiz(zOzL@6cbw6nGeUJnLBsH{Ct79CMYLbL0TOKYhv?rU z(e1wS9+25&zcPkSSbrnupYM&9Kv5%FRFh4(AV7Pfy-6e6yr}JlB?|qh_)85vjAZFw zXt)p>80pMxCggQqAE-J{&-wV1)GEj0=PpnD!!-5H?j0EWGxdUr=FSgjRo zGzt!B1&fRVeqt9{gn+?>if}+HIA9difesJu21pO~8TJMbjvGu!LDCXcgfAe$iVa{x z?s?*r2G;CM-N3%74Uk>@Y0}8{QCkT%YT4bG7S+I#R%r$KCXG-~5rEH@Bi(ZJaxWYO z{1oM=sC~%TIQW+~PBW6@tcHkQAZoE)R83IyWLYbZKp}(mv0?wxibXWRA}~iZ!FnCI z;YL|dDbxKsA2@2PH!=<3-)AtP@PDJVY=Q)Pg~0CA^Tg`KMnMt4$OaHJn2>@IWvq|d z7mb45TERU=!NF4$;J4P1<=CzjG&x&)5C9z>d}kC?81{xD?2cCOfl^Q>CmRjZF4R?g zlS!eiKS&J7p;NRa_K9*QyIhs$7V8e^o&%MejRLt^>y9^RWN<2?m95swzBbBsD`mQl zUNagG8kvRyijpXo4k#^kGI^3-z4S1~119ynxU^vh4oDt9QAwI_{6{^>8zGnJni1rzFY zWT%-lGO|Trj%fYPQuoqstXWq@mt+M=R;P zOuO7+igvmBq5u(M%|!tw!m~Nd<@3J<*CD7_BQqsvm+-yE-=Lv(HV6z{=4n9JHjwoqf)(zGb$@r7_mv~ z0`yJHHEBppl_U{FVnX5QT21CU`KQtOi4hyI9-R2+c}96$`HuC<(7%dSuTC5-)=Ed1 zsxhV(oTno))0lYJn$q2~auDbYfTb@r^iWBZDT}qz5vJZ!rfMHImI5e?>137H8|{aU z)*qs^t2k9aB0^))az>+0cABc4|Bn$nYXdm(&#M2^sJC>Vp+`Dzu~s_5)K+DxcA2rX z8wN0&O#;ZMY&Hrnk5;VwJ_zOrWyg%)pBQa78nL4$^&HUk`*)+>((jE{uYzu|RyxAe z&=m|sZT>VJAu$Mkzgn39kbdvp2-ooMZj*)rqWmrbbA+ zS!UE*`qRNE87DEq}M47T!D;;5Kat%X( zxyzWcOv5&tQW;Z2L1)k`{f9!T~abfv(*I(ezlcDWHdVp5M@ z#8l2R>Mi|-X!WW#EY?a#_+4vE{bEerba6oN-!~@pJW}~eP#40I?lYuNA(R!1wSo~= z-cwd8gEMq=EYGm50ooF7f4}a5eZatYS%yCvHD$3O@$kXC$NAJ)C-)m$yN!)6Oo{?s zXRKJ#-PR4eaZv@bSSuJ&pn7AaV5TmSWy;(Zpn2>iEBObDSQbnAsZlEyYXu{$ysxZO zUSW(_p7q8^C~W1%lx65@86pZ~vHyd)_W+No+WN-#o+OhKW+qdTkV!Jh3?V>*bcPNl zK!AvV6h#zK1Q7u#7g5A30!mSusMzqLh`qg56!GfCa_t4{wfBDY>TA8l@~z)m`<(0< zhUa_#-~V~O=i_-e>#V(gd+mDm*=JAIT3&X$+iL%aYOmD4nzG(EDmra$f&dMA)s*6j zzRFpOo9brFXwW=U%1JY&+g#A8UMVsUnn;embLG8_9Ljim#UtdVy16roy2ljtyeVpp zQSqu*)Z*(+DX!>(oTa#_ZpMsKe&tdY4>&>E<6`R0mtONmC`te{$OU|yvnV&!%WjX* z$TKkxXP#k-+B=j6<>eK}8pYod>-7$=Dyd;vuaomd!Cs?aI?3rMn9qK$HCncqJgr+; z#81d*@#~^R&yp6a;arVRjDjA-W>#amN%3}D#ml)J!;FF*qM#$O=%9W3DNxN>H8@ff zoMRL;k(@eWwo$Oiz+0QJBU`}^Rbc%-s+&8b z-szRpIn+D5O;PhMmLhI9l#c4Qm{MHPQX|Uj9XHj@m{H0WE@g51Ino{%<6I(zxzl{{ ze@sy>;GvvFxv5@ud!_q|i(0AAGiAA;!%SJ!pyNq|ZgWMYZZ$Gq5N@iQF{42(s}%iP zslPF$xS(@eOL@YS;)0hhA)=(*BuF6Iqale_cjMF2F&1zhR^G8*Kj zdfDx6dsB+~t0~IG^uIJ~gtnWaT)^i!i*i%F>~>M@&qm*!Sw2r%wHJ@dpXJs4AEVvP z*X$PWHiVy-Z?m75lUd_Rz1*lAYqDKoD2@3MBtjRtqNB1k^P}RXx*0P@W0xuABU8#x zM#aF(q>reS2TUoh$Y(^*)4KI=Q{9XirTiwn)8}OAozy7mon6v96_kcHEH_%u%ht}l zGoEr}XiT5O{e8C4xRNz4KG7(<#3WPde9xq$0jK*mgPQ@!l=b8?RqHD!VHj*IED zP1@rX)q?=sDpSN)Ig4^rz3g^Tku~VM#gCezE;dEHon6!wrYIM1tqaI#m7D5iw~IQN zi#l6lz=vERO<83q4gRmD1Xpy35oJC>ZmOFxV~C#Q5>x$@tSh|m4uZW7SSGLAVr%xMTWV z4xvB7^5HL~A?t7u{TZBofU<(S0qb1ngQ=gjUahCo^wl5f|Auh2{)l9HxUcOWnENwi zV|;^zdmh}8zUjjK1KbSX8sWMk-#p*V!VRQ{QJ>~}LAcMrt@8aK+@Ha%_f^nAG?n)R zv|Q{v7~C4&AKV`K+f!n9fqgI^ourAKi=2D&(I;S6p~y!K`2Rz9bbS7I%2?#=q84X% z&02jA^?6a=MiUA*fKIuZid(0jr{0RupHaR?DQop33|n~KElTxfRC%c_7mC7Y4Ou(8I1KjGI@d?#lU?L3Ipz|7O!V45tk$B7Uvz#h@K7 z2&3$Qs7~F8KUX&qXssS6&~s*Tm#)>*n7`5EPiFoOkN+d{dkue{UP2R|{}@qp|0_D$ZG zy{)tRh*r529!*5|*Mu+lj1LHW0lzRbMS^S4u#GRr+a|NKr? z8s|MBe^<4acZ1%~LE?)412b>e&EoGJm1`O-Bp97=-HIi1av>C)q}Q!mOF9 z0~ySAb<=$7^|cj*^OFl{H!XjoK55qJszZ@Rur|r-tk=yn_d68pPh)OcWW7FR;QsY5Cegsyf4 z?>Tz68(d_wevBI4v;PfcURr9i-bz!Y=YY*L8x5{$vrf|}^c?s)4UnYe2gj7tvek3Y zZ&hxRZ_G|wT6+$zr_nH=&H4fo_Z)HqwZ!NO4*!`}+Flj@n!aQ!!0j4-b1|h<`!A)s zn3RX9GygZXmc!qor=gcFp54t|0o{~W_e*h_vN2CmC1ov@T+6~QQXMl>zNAMhGi98& z+Pb;YWiCeTouHIYy-AbH(WlcqDcD1$(!WK263TIpZy#^PJudqHAM-xEWeyeLLmc6- z9rNC|l2+#MRu&?haQHIZ7C3(A7NRCnF$eq&b$gIf3fKF{B9$uldT~%YG}lzI7YE)! zz~ADSz78d|cbqQ@h%GT}#6iH{;!?xL9R&O>E;DS(LBQYQwuY^Do+TZ{<%aFyAmDFt z+_3$fWsp`Fw#k`5yVv3>!?rjG_*CRUuw;ji7_q9LC zLBQW)FAltefWO6F9C!x-e~Z01@J=3T)fxxhLBQYQ);RDEO*~cHT|Z7OMZjNY6ted= zaoEq*&MNS&ao`;U{4E}#uc2&t?Jv_#Pw*oRze76+_**>6@Hc7)0e_2I41b4qMuKmR z1MlF#y0|qCyn}$h#bfo`T%X5Gz~9ip7}ui7!|5_pkEQ!IUsF4}9oJJi(XoMjeBVpu zN5=htrA?od}=`A>eOx+zh()wO1ho{Ebe!jU)*8Tl_2CU@C>z(j`x| z=}3ilJC4F{N2)>y031os9*i?00C1%GYR>3Tg!V3kj3BGuL8j=Lbm3UQ1uBGCzy%7G zMUjXF>~i^`4Nc&#VVM)!ZgCM6n@GvUcSaFt2OwFyi;`B{@yu z(Bo9Y;sx7TIXqi z%Ri(VBEoY%MR?ACC?Bjuc+TIGU-<$(FbL~hpsM_R!BqMCy#`V^&s-uIFL4TexbQk)n+cHOjVl&3psJ&W+sbnWAX=B7gn`d{0@^PML}?xi>ul^UqUbGsy6%jGWTLP=3Y9U$t!0udG$>uuU*3AZ@Zbi{u@Z}1ax*)a2B4U zs^IK#&=Ndp29r69tHw zf0oG^A2a#mFHFuXqJ33WaCti>EBY`wdm@u_<}g{khRNE?nXJ2u$@({#Y{0gxD!B1i zCU;lHz}+*1$*x&U?me5yeK&!W419*kpnBTTRFw>F1}Ph!M|*{;vgS5SMjXkBqoT~U z)G#?@Ad}HYGC6b!lQEYt8GApIar>AY_8CZdONh1`RpndaOfF0^xu_qLt?-CcReteO zCYNkwa_OB+E_zRpnRIGWk;vCflblxpD=Qt9CNk@f4G*KV`D>HzwD# zqwQ=}`L&Hqt~-Xw4QDaAX$O;Ao?vp@7ff#V)3-}i`JL^V+%IDb}*B_ zPG|D?Dke`{&g97lm^}3klc#@Y@=QD0=~k6LJCMoqlbF1+kja}Hn7n@jlTRNdk|=MZ zKVcfszH0D6T}L!WSmz0}N=zL_i%f0Ku+DdqctZ`XNnTjz(^9;`YR80imQ_`?-$uyR zu+BdcN|!-a_A1@BhIM8^9o9G6EM}zz1yBuG6gfJtqj)c;%b;J?V_oFvyw31z zI1j(5AS+{~4eJ1a8{WX_KB*E?XkFh#q;tdd5FiFD3? zL5S$Qfm8F4qRWsXgT@HIa}On$=)4VhVQ_TbzbzN@%eLtPIICsOXjk zysbDoZ(yu7I1G{AZNQ6+qw@wP>Hd^OhR;Ue9mmmm10R}cBYpio(h;3Eu*~!rKR`&9 zO8cEp={|yPYjEhL#?g5Li%fqp7&6rrFU9w13)W^wH@qe}I&T|}&Wk;pF2hxI8wU}h z^S0sWym;A)NaQ)9^ETi$%h7oQ4SCu42E27SI&WZymU5(Bcnx!O-oSDz-R=>*w%He7 z;2gb`tAU(0;%6Xcr>JkHX}*_<#6x4E^EOn`8=9l@24-r$w=pIWEqq?U_Zric|&{bKI+bKd1DRS zoekJ#aa`U|!s-px9{Vni%NwG&yk@<{CXD0qhCVh5txm(%jN|f#wy|c(Q;)40$K?(E zsWn+-?XiF3xV#~X%PZZ|9-BFi%NwG&yqv~wDAj=N9mnMjrLES;>x-*_(V!p4RuoZ( z-NJodk4+%QAP8n-z!0wde@`i4=b+jsq zV&lqjc}x3*5SO>KKPrrGJH+KJ9Uz<&f_t~pM&Tl%NC)CZ2p10_E^p~b;Zh;Q}c}rV_>k)$Mm(oLo>mNc~-qO*+HH8qDxAaiqT0)zK5;x`+uFLok;_{Y`6>dTZ zad}G*6K-N?2e=8sO$s3{Z|NlA4i6zNZ|M=j9T7rY-qK@)n;b%1-qLBp9T`Gg-qPv9 zO$i|`Z|Mx-jtU_zZ|Mob9UVek-qI6=n;Jq~-qQKP9TP%a-qHoaO;h#w9N@UTrOVMS zvpe9WpgYIqEnPVQ5Wjynr z(VM942H}Ds#N}>BKiu{Q?MA(U(Ts2SkVj5`wAU0O1OLZJ)-2wcj(wHQa4oqtrql3&%prNBI}HSaG9$%zGl%4j{$Lbi zHkfh5*EEOZjjqb>Z?mZ5Lz_eLM(;QP0^i^qk~eyO&Kep;_(tcDywL}c0ZjsS>TYo(GO&9p(xSWfa3-Z$s6r&e?$Q% zMf0OxkK+gq$s7H?RfVd@@dStDjb4~ls(eq>Sr z9FjLW*e9QPjF3^rF&BsAjb3lQH9esj<&i)jByaRK^R1bM=!samDI7M{eBNsT(1`I> zBO!UqAS?^hMRnT**{naP8V<=@MroQQMvUmm2+3Q+A$haKy z>31PRNZuL=$tyjEe3H2jhvco1ki49+FNfr|TRBwMK|p~bW@0osZMaD*n?({ea5)4B$y-Swc^Pa@ zGkaeN2_o)Ra!6k8oVrC&$CF0&{)-3Tki0xz2EaYg`x5{VlDCpW@`?s7qYUej{6^NR z5GoOpx6*{<6$H=>Q@z^*fq03P9FmtOjTEGMjeraxc`HpwUMn}(Vi1Sqtu!HdTcucr zJ5#=X&OA~P!L6Tfd%i$?7xZgI&srqYmw_I570Oxo5FJ||uj_!IPTlRL%`|CpIy{&| z@>WquUIx0y!+z&9%Fxewi>?veMldBJy{IS-$(v}0G50%DDWT_^blio*Y^S3C5bH0_ zkgWB=Uk;{Sa99ILwaW;}+c=4qYda(_I+DDVzlWTv@ib2=A$g4ye{E+zyc@RyVuj>o zFE*ZnyCAhf@*ZAntKSEy6_PhY+Vmx)C({nJH6$+=tQuo9&+L%A7-P56GFdRaRMQ~f zwhSbp7m}A7CK#So3~ZbUpcRsLe}w?6#0uUj2tG$rCGNwcAki0+9 z{L0hqKLywe$t$>lzq^Um6p~j8c{N4V92l=N=xPedD}rB0j~Y106HrLrRzd3|pasu^ zP)Ocf5P4JwuJv#fl2>rt3*17f#ggA9mBpYWsvo{}7OE0qOySME_+8E|GhISY zqaBhrBWDj!C?R<>a*l=23dt*P3R(Hmb0Ngu!-V9`5UzvJ3dw5=hu;h#Lh@F-A$haL zSq*>Eg?J%(TOlg_38Yp?-VE(|bX>$Cd8^%!yjIrX2}m@AH zAw(nq6OtEiLeszUPE9ZO5E7CX`#^W%Vg9ONR|3%*l6OB9!~Ws|BqZ;C8qyyC05^js zB(FU&`9V(S(Qg2PkdVAKLMC?y4Dk~bS+tsCiW z9#%r~+E{O4Q0YIrFcXqD8>Z49xrk~B$!jCL1wswibdZ=Cl2=Bb+t#}lQtVKxIV7*G zLz*_u1(=Y$*$BI7r@Gi`3CU|?t)^Y-!c0isY?w+vBBvQgyhXe$POaC5-=+yZ_e_{ zq|5L1uo9B@z*xKAKXY?cOGw__xvb7F>Ec#NLh>FsgGx7hSP99S8*2^2A}@o4u9a!-J42J z_cBOG-rO0u`nag{1zrXT$(uWa*Gnq>h?hY^^5)Lax*2W!0T~dIw^~B-A_Hx&TiNzB@4f9m0^kx@UEg^Yr*e%pzqiq*p zR!CkOW)=E@3#*opyf(}$)TnfNxGE(iZ%(XAcXwgc5|Y=3SrwcNm=%)OhFKLn+l5tE z3FglprB_?Of<2mA=(ONJw5A(OL(__7zPMlJ~$kX}*e}_x#Q$q(6cl@;h?vP6tH& z9JxZ+kJEcL(z@F~A_~bHl90T}k6-LJhiEEXESwXPki1`#LWJb)3ID#(g^;`sslY*A zCzx4Qb4Xq}OX~zP%W4kE+XV}`?u3xM)f|!+3bYGtC4;+{Z8VBBm+6{*4;CUnIb_j+di123Z8Xu%A!QqF{ZPQb@MR>DzLtn)$!ke`l z_5p5(jiTG(U*i_x&Du5p3%3Yw)^0=r9r_1989}Kdqqw~l8b%>PF-@lFWLfBUP_JqZ zZ{~L%#r5-%h${b^QgJFM7;t)gpC;lz(Z3U;BG_+6@FJLS z%m4~EHjV&m3LY+KwSDZho#6D(x73+fIDy3LSp3@>iZOZ}wML&?fT@4cTBRoKpy59} z4$$U;U8Dmp*^f9BpeBEl(X+Z$-{hOANWFV3UAFtjj3LkJM)pZ7=ExZkzMD@GPN)Pm zMIVYP-dXs`XfcBN)B8yM0?gZ#k#({$yQ$v#<-#+mKIn%YM%KR-Gx>sn1StaU?ZOR*(XqIZ zjcUalRfAr7yAZ`tuT6doSvDZ;^TNxirsEBCik0OMwBYkX{@X^%@-wpRMwYJ&)6@ae z4fG@{i-QJz?Pi&xHPuFcS9l^3l)T75m$qh^jBu~|9^ig1>`K{J8riy5%u(ND7d&)kz|~}8J|JMg@xW^3r2LX

Oquj&4c$ue>AR_kB%49_ zXNjK_A%0>bN?%lnw$!hOs^b@tny=B7fA{Y*+1KxEf=C@ph?xl8qQCY-&7>w$F|#x3 z%oNEU0YFheW%20!epD28d2-cDvCdZ-IN_n>+=MEoZ@DzUpo<}gjt)8_} zsS{}O)f6K4!^Ej+yP-FKG+o9O9!mt0@kZZ=hPK&6dIq7!<9K{71iuh{Gmg9BE#jef zJnOC74DQquX@(S{7W2VV?6rwphkL=zp{RhI^DE*glppGF#yGkREo?D$_|}x2e_azn zXisxKmH83M9LjYVMZ7wGXe0f-G6rCkKVpEp4c$TjJZY=A6Q4x(<(iRLYNA_cLZs28 z)jYgM6R)OSPU>_Yto-Br8%;&_8htI-HxYyenUJDsoFAH=1kas3jx|LuFrC5W9!;D& z`UU7M8&8*rKTPc$Z*+ZV=rx3zJ~6>^=zI&gT7IEi_>8jNnd4#EoVU2JE6M4#yTH9y zE!~=w!tcfVfxBx1K1Jnq=XdlNyuMY-SjvH4<9z($<(I0p-)LDvV(fTDAvn!@iM)@N z(fycN=i|kzBG|vmzT#F&#zBi!1Kt_r)%)>KOV#WBsLi43#5;%4PG&5XzUEug2`x90 z0{T~XA$ce`g#>`CA?Bpx7cC-7VC=4Z7c|XTw2*}7Ld^ptoSPo16#91&J)gew=Y2!B z^FG4m4Z56+KD>h@c}oiCC7Nh5M(C1%5sCeDqtt>UDEk}Kl=Fzi=(FO(bw}!wsHXYt0*t3@4*sQ#5&yay;G7Kaws3sYogf-%4i@JI3Ui?v}Q3 z-j$R*??PP8rHh(dNv%h@<4Kk8!i5NRiE>XgITqwBcT0A;H*vXhXB%bvpzQpGhY|~A zPa7q#9qv$=O;Io{Hu=B|1jX|tPMee2S{hUA3HNt!6 z*XV9TiZt_-@Uq;KBhcEiagfJQ-I$9p$gZYFrl%%`w?+)yxsDmarjbT{&b zZYJ;JZsvWQTKpVocn`qe^8wT|x8j-9g4}XBeQx;V{#NQI3Kj{_DYRJ0-OSp-i=ThQNThdIMZd2gIo20;r z+?`AFur1zn6(LXCMyS6UX~E5cI$_zelj&W~s;<16P;;Ey^w()z7zPRm|6$l(u8|;xS_t; zoIKmG%iZLYIC+a+Ytyxid=I!X+)d$!NOLXt+6B(_EY%U^=({;~tYs-7dL(Q$+g^lB4fj`J>s|d1TyFH+M$s7F?_7-+~uRNp+@#MQ%w) zNl7=Dl3aOLBlp_DCAq0??u?S&;gXgmTj86^o9GP`xEN+1XTUZ1--$DifWET{_p01b-?C*VQ88Oc+Zlf# z=(9J{R(H{1#GOstwwp-JM!K1F2PMtAobKq~va>12x0En3LbcPE=VOW4M^Lp40f?Ju z%{t>of>z#2H+Rv^f9OV?c0b9g?`fi2dIAld^%tm|8#qKp?+(i9&h~Ghibiaifvz#s z>qnjs?^BM9zH7b>`u~_FjqlMTAHgDcWB$Bv>3-HPxHwee8KrdD1muD%P0{OjH6f$= z(NK3@_|O4 zS(2OT=FTW-09ZCAq0??u?RlN=a{U~2JENrEq@SHyC+lNp7l} zJENqd?}Xw7cbJko+}{LVoocAN$e(LUa^*dZJhLP>)yBL=_vtgSY;;;Atti}4qRUhyMDC5G8f>w_-UM?uw!p{rjSw?myrHkkTys@5ys!@(ft1Co{j#<4<7zb&tm( zMN!^AJ$?oA&T7f;cd(^hsW)=!w;t+p=6e{_N_{=^T|EA7<|7`zm-)EIKgWE+0e9_f5b3 z59j1cy@#`s9QtwpM)BdTH<- zTPr?mKXGyJSz9YUYdsu%*4B#8S}%?BiqG22ywlldt%u{h;$ISrGP7 zN?~CGEn09MmZzNjzF?gEemy`6rX0)U=rfp1yPC;y4>FnYDu^@3?+2ONoymd(y*C_Z z;UFf9j%Mm(*C*E3ml8zHgL5>;d44Y^?)!+j7vEy;rDEn@If%)t z>zKSYoXOv2F?szKkTxg$hsmtUVsNuNFgdA_$(+ed<}P3|Z!MFPw=qanf&n$CTD)iWchDQR>UHJoZX$tIU|{@p21}8 zX-w9g&t(0*Og22qWaFDm?*5s{J(W>t*)@#Gy~i=RZ!Ji4;EhZM6_kJ*+zzB<_?t|c zKVUK<&1BT)Oj>?qa!8b3QclU}ZcGk6l*yR+Ovb{&ty41Y1}29+1X9-WF_SIdFuCw2 zCKnZ#LAJFElZ(eOxnv%bOV4F;*>y}Vf0W6#513r>FD8E~Y>VvMyED0RGLx&8G1;+$ z$<+@s+4(AyYyQdP+P3A$cU^BLHypv_rlm}7xsu6kdzsw+4wF0o&E&2Gtw&DT-Tj%| zb2yV-i^7`;EM`*t_wed!T9fF{Ng{9?7SGdH5Q6w9y9|rd!WYYfz^;*iTR)898zb% zR3(;3b|vN~0|?xgpcGZ{6YgjAqd(jk&bKB1qy(0gu<9zO+`-;FQBX=tbbetqR@ZLF z^-@mJ%$kGg;VL=-im1nu@a9=GOSpVhja81lo)*m$;K`$@Ko2-yI7d~(C2Y6j(kfE4 zRDh==#RD!A&QS$D__a~67gcr&9s%(eL^WXx{14Kq9l{y2^26b8=mNflr{B<4zJ;gX z&}DpkHl>yhxtj{{!yRbp*oWvg^$lInjqUo;B~KN!;a5^^rzUFSFFqgXFLxZew#SPF zt(2}pf2qnerLn`cqSJkl=@O(+j)6q#Dknh{vXnwxBucyF=8wIV0a8Z{m|j2aJJ$xqZE)d4Rz_N7&n%EJp%23|(L zBVKvzaIJ_Ou9ZNQs>J(`9j+B^^~p?;%HV~_4%dom^Dqf?397^wRf*RmJ6tQ8lh@h; znL?FQA%w%VqEqu^{vt&txS7AlQ$j)KZZyLRD)EwKhigUijjk+sN4$U8;aU+nT;pm> zi@ELG>`v@(t>`=H&Pu$Z+2LBzQ9kJ~T%~#)@#1EOYenR6t>ky^gd!{{(^FLC7Hhyt zkW7P$ZmGl@o*k|gjkN}cA=0~*c=@x#wW3KHyBzaOLy19PU%(F6ias>cM*4aa(m%%2 zwaoMwiY!60RJz=`i0Z$Yf1klCJv&@0nx^?)CK3-#!R_s73guTm2~2b- zb;(T4_clfxu66tfm`YmPi^$=c84qlx*x_1Hd7jlj{+q{>f46S_069;5nf9>P6Z?8F zs3UL&9t`m$qxC8RXW+q@d4)KUFKEnyC5DYe;0!!iYS?%r0dSdNQxP}=54JUIedJjh zmSDMIdqm(2JQz1@|Hv{(D-7Edfiv)6m0?>Va0VVs70}m$k=k#*) zwVxh=Gw@(L{SRhOiZlb<-n3{@mNuroPkH+3_RFd=hJ30ul>0?f-VjY);BVKi;lnhQC8c;0!!?h{iII*M7H->>Wn@p(-Csoa`QhmUEvHoJn=oMxvGIP@bEY+do&4Z;0!!G=_dlg8TdiRQU1#)f26po20xJ(C#3gO z4V-@$SAWSshgKCl`Ul-i@GmGSo~LTy@jFkUM$ik7-!7M5v!MwbdW!r=*KD`Ai0U?x zY8AZmAPg~FW5$E`)xhI-aJLLP`7tOY?tU3_rv@IsgAYpmBUF()DPOsRUP)o7yq6+U zQNI*EHoVjl;Un}gOFz}Y0UV+aSc2F32(^iAe8Fo+@xSz+{lwJ7}(!Zvz3p`+{DM2z-3=OHo zw+}lg43wG|97}#qgkW6j4mPni%gikbfP#8GhhwN@& z;G2yd6bAO#eIyPFBMsc0mH3on2Ze!z)f=iKzU|mSVSpSIa^+PeKJ?f@Vc=t<(CRdN z{;`9?z&05u?ym}b4r*TqYQ&#flSN-gd?T`hLh|)$D%}y=cXm)1AP0rqRlKcLmH32Y z2Ze#O)f(CJay58sAr1-yw{V|V;6sxg6bA0c7|TXlRpP^w9Tbw^S*a8&kdedZC_5+& z%r@ol^niA*b`MoK7?2M;Q=6`_tAYlXKt`i8iyn?je9N+f!obZ2*9xh`7cM&}49v?} zj%cdH7ce_04BT$(XjOC&K91QzVXRLL92Ca-qrzB1;Gi%zKscuc4hmz9!bNJ}pfEN< zxOfd56vjphm#Tq-!q_O`>TBSjFxDbmj~X~Aj2$9e{~92_la8MXKLAax9;Gi&eqHt4d;Gi%zU$|px z;Gi(JK)7kD0!I$)pfI)^?J~P*p4#2nL1Anq^6PR3167Tq2{3{mTndW^dJ$Fwa!^>)hGt{wR@52}3JdT-L?5j97;*~4HDT%Qr|8|LD{yea zt_e&3A|EcVP;v<_ssf%c*)?J5BS_FCNT9in6PSt-5W+QK=>sA}0tvxX%zzNC2}^&I zUPJ;3Pa+aW_R2Ltz%^m1=ADZ!K?*@O0K!UeV8gBnOF!qD2%-W9IP99R^ea0V2RiJU zu=ME(q{w_j<5pH;u7Q6pbyN5_yps#WHDM*reAqQ%xTD6K-`y&9L^@m(hM)9FI#Q^D z*dSKoY=~VGhClW3=T8ZL!(cQ@7*~m7BX&&~e$C{^hTRBpn8dCL!(W>PiMRfe6Gu$! znlOB#5hC5ZBXIDP)Q{cFdWIBIvV=&SHQ{WG zT@!|nw>4pBC9hb~gtIetO&I>${ymBlHFixHe#a83N*uAVYr^m{(<59E;kEne#JMHACJcWd zPqQ~9suG8p?3ys#-#%{ehO~qq^$HwzvTMTd_pK^a1r9&iHDUO|tWxF6y8_3e?3yrq zc~+GT00*Y*nlQXRr-sT!0Khe2c(&c0NVev4xq~)U;J}q#6NW!&ttNM&s=OUCxF!tO z`($9II<8GI0t6egyH+70Vv8k%BaK%F}o%VcQz{ko*-3;BV%?=7!H_pR?Qb^ zQk6JOX4iya@}NapwXCuh0te0PnlM~?00hpU*)?If+(z)+mQrv$&8`WLwti*XayCw_|_Q`<=BV?3uh|aDF!`GWr8Bb_NxjI1zxF!tW zW=>_Y5Iy+>x+foN7u=0C)z|Ctd?Ia|de5h~I3F-o^iEHKB6zQ>7^-6sXtHSb(uSL~ zl2b^c((J_%9_=bxj3CBOoa%EcvcRKV1$(sPJw)s*s4GELRiEcP0DH8;@YLJVA^${Pc(jYx>(YZc9~Z0!r)UJ(qg}kap3^{vd^GNE znKD^;`##qg}%FXeU5*FdKr$H|T-X^8B7?amOD3hDW=& z&(Q{#4$)hh6B7@aGEDKgDI92xRK21KN2t3-w$)laL+N=rU$q^py;-Bc9^!P@o zeXbgAYh#*LVD@*5Fb*2L&v#J z&JbP%vhGz@uIKO5a`7Sy;Hdw$6d@ zOB$>AZN3$>2;xm>ggmoV;|32Q9_@JdVVB%=D-g#oQSq0357RoZpNgjET!47A+fPH| zHvqifm1^;$ZwZ~E*b`GGceI0x5RY~?LMC@_4^fzRYEz~TncPPJv5M3u#G{?9LrPiY zA;hCyHo{sr8gKNl;?d5=dJBVUeA0!%8*iAdf#U{j~osG4ccAX20 zZ^@fTHOhvm#%Dc*c(k(-R?~g}3?A*`59TE>70mSF*I6sBv5Lk{aN^N!KXua7F&<1j z+U11V?LFPikPwe{`(>~ieU*!iSLMG>&rMFOYJA3niATHrU{xDS?#rl`rsmI~{ZUSps&SA96OVS;+En8V4v6& zPIuT|8lW$@o1M5s~S_?P00!IXlKK$3XTE{9_`}d(awfh z6+Fd-CB&nh4a=_JHWwS8Sa=o9?wnY9>RnhuJlfeXtAd{c29I{}#fAT%Auhs^NA?+o zYHWj(6L_?XuP&TQpB;SWW?*QCYV7MG65`R$MtDn=c~770Va21}fwBDDnTKz)mrFd_ z*}2e3s&S8p5RY~?qO}f;9nP&uh)25vJ90Yd`kwU1(L?2qT!+woPe-m0_T=>b zW2r~_ktiO&s0JSG;#-j)Kk;r35qGh0P7OTT#V`3Mt*Y>7*T0xPje%63ja)3~yo6U`#Fb1EP^r-Fy_!78?MDj+*27y`7pYy>6w z#pH=?0@}QGBvOpYQ;$M1dGeF3KwmIUVZVML&Xk2rj^4mz+FeYJd!EURPeB56${moo zO-vRHry4tfg)^8eTE*lKmor&>CzB=bFgfktOqQ~lQD7Nj4>^G|@mIS}VEJ?=XPwVv z<(*7cy~5<|51E|vJCoHVG>e_Un!ZfdPGPd{3?}DY$YlLrm^?FwEBGA#D$)r&{~0Il z!(X8|ffs$8_);GxuOQyH6L=Njx17LhbD8`N@w}bD>o0+XPN+q`(5#^mklACHoOB|S zIm?;My_m_o+nAjE7?b&LGg|2m{3!53mx3nt z7f$?&gP8nj0F&*l6Kyx6TuLS8OWpqpbY?G(b zG&TZ(8%XiC4z%1Q`UT&nCt3$jJctwpucm>?4<-jtdxJxV(EBg=Dm_4{;L0va?N!0k z2g5Kpm_M9|FW9|-NPciwBM~QfB<*)rMuPjOf-6(O25^*PEufU;Gb-#39ek3?dRhgy z(JHXo7yLVQ&&rkgD)2s4XVb-~&b;8OT%D5%;S28Vgz8jc{SNl&hwA)VjOtw1*JKR& z=@q*b87~Tsr2Ul&Zlb-K4nCYl_S5JDH9xq4Rvs=TP2f&cz5K-PU=h`a{+^w|#quqe(raQW<)oze z*_2TuPok|)$D-6%ng*r=63X3#*yKVaQ-dld^e2QQQvjg9Hc_q zOiNJSN^Bdd*yOE=P+C`YrDr35%)zL;@{9Q!TlYGJ<~5fXO}YJI1D8PctHltm#+gY> z;v`7L5H5+=9UH2})7PLTW!7!W9kY+ma$mU|9*PZs*iE_#^sSzZk-`$2*q|;es>k zUxX1;3yRx2cxAHA);v&31;XeT_ zAMygh_|HJg$G%Co9RsvYK#z7hm@awBFE()nv*`ioz6E8}cWR=R|Ke*PV2#tAt_|3P zKqMWAUR0^6l*Z<9!P9+^(I4ti3gtMSNaxfU5W_q!xJmQ947Yy=UMcx0>|oeDE_k7q z+%lBd(y)14@KTNTXvvns9*522f=-?_mOP*sOCHdYYC@@lun}U*r(h}%n<<$NGAH8L z9y0YML?A6#jE(r5DR9jliZRcip zVe`1)cha3HZ0FcKE_jqrI?T(Y;@IS|d0Y_YaS`r>9!Q&>qAIso16G1$8dP*k3cEpl zDj;L6!C{E>ZVDSkHjfKV(nnH889p0+A-WcrK2kg2ZNbizC?u%jEcp==%(VDmUUY`O$j)vXpHn8yX!JPr@E zh(w;lJT8T8EStv#8}hR8DeP+5JTACHOF7amY;)N>F1XxEw|fNJVm6Npo}=+_?%!w6 z05Kc=K27tzOe7u}znI6Rz5xd2alx6I?`@2j$HlQZXY;rq%;PX=RSFw#HjfLI=UM$z z0Q0!wZe2;*9mX_l9v3#|ap4e8GFrvhJT4qF+YK>~3zryH%;V^<^e{NS_F^6vE;Fo{ z$A#M(R?Oq*5B3Bs=5gV;VZ}TyTwz!-j|*2BR?OqV3B!tcT)5h>VjdSx>M*TFzV>1s z7jCD^m=*K5aC_4tF^>y(GOU=#g&Xw!G(TMPxNujESD$AdM}OtKgT$VBT)4ZgBHf;O zT)4MBh zAIi1&%;Umibx$sz%;O4R9#?!Y#R=uh8;2Q0lb{afaj{9&1cG^7kF|8e=5f)gI+({r6ViLC4(4&u z>L>#pT2=6f^mvh2HjneGI+(}#6>0>%H`cdu`E?tbz@eweJg#oL#YI%_iBzlbojowb zY#tZBuMTha@NOA&RR{C9@clC8P94nS!VgOQBUF*ssSY&`dZiD} zenPu6Ac;?CHjgX*7b<7gBw5n%UCrik#WQ`Fc>BGNq&aN-pcT~z9?K)^h%c(08>GPUIyA_;B~KIhpyu9*Iyo~tBv!1q0y#})6f`zWnH zpyjfpfx9z>&we(KD^6Iwq2f3PVDq?Qn8!)maVWs%am61Sg;uBGsDRDminp<5$&)k=PME8K(NJl< zg5DyhFr?LI0s z6A+lk6<=dl1)Wp^8I8_HdN@)z_h9q5;+qYw6_Ubf2%Er)5wxU&B87EyIDk1HD>oKpw$xUxp!B6Tp2D;ptPybk7ZWg~@4)xkWj zY?N^Ibuf=BYZ0zT9n9m(4iT<@9n9m(Mhn+e2lKeHLxpRpgLz!p7_1v)9#;qRxU#Xr zO{jx;T-jm5O{{}?T-gNSCe^__u56NUhu6V8uIvcmj;MoqT-hKY~>Mv)HoQZ_L{z$VIEh$t`6pL<>z8zVKs+&T={vz<=1^k6)0aX zoKpw$xbh9c1?ylQSH4lWNFB`M$~OsDS_kvE^3B4<>tG&Ne!g&tI+(|m^EZsIeX8yy zI;SYV2&(~^$JJr)Q@$0oX7jkCX~F2$Jw^|TUo7R=ZHQBl)(_A_QNH^V8daUdc^g~G zmH$P)VZNp05?oaBXvnsAC-5Uk&?QKqX^(@uDOdi02$4WSFv+bD!cwmMH|bC$ zkZ>E31hS{@0|J(E<(fA!x&$c%fjv}$mEfF@E#=BT=b8v2iE}@;lq>(rPG)1H#F^Gddqi!HE)$WCh(jxpI%E_P`SDNZ-pQZDwC-J~vTDHnS|(o$1_Dx;TLY%*`U z%!`qG5;E!n^mPiyqHHM_OB-C~7hMU1Q#$~IrCh9=S=x|7O0Fl8U`;quWlOo(@wO&x zvg8v`G~vjVE#+c=+rLNI01D2&V((Z&mBPs^Tgt_jnI7RcOHSF@#D>=Mj>Enl=E!yz_X%Ej*5Pll6iwv>yl-A^ZuyV+7M_JKUj-jJvi&cNAH zF4o^ZdGLl*9`z*7$k|dZ_I;}gmBg7jTgt^Q%qmqr!jm{nXG^)*j#zC8sIHzYzx!5PI)#NTzsShB7rChAuC#y4e1Cr?hLg6VK z?6aj@te;OloLeDD9QU)OTI+spw?7NRd7QTOF@ zZ@*Z|_1l_HnW~!lUzMcuI@Ymp0s_m0U*>HQ+%Ao&uOG zRrihi9!zt;2&Bbj!Udcb=qXW>IiwL4zNfS^wOrnrEg!D{G4Nb<3#6-zmcv8z=k3x5d=`y+6$luLS+a@;V%@T_9s&}#sMrCd@h<@Q&| zb8G11z`;^3DVA~=60cw>^9w*=DVOvt<#=*J%H^tIL3&KsQZDIP%4I{k0|HCAq-QC| z5Uw-6g4D2yfWT5N=~>D#1f5udiyC&S2k|WB7}5<8T-313Jcws0#}MzLhCSdxJWDy2 za*L$V9|HnQxmwRsPUZ;L*Sn};zX1qKxmwRsPR4XN|AARjEam$09Ex|O13fm1OTto4 za043M#9FeHlS0Z;)RF;HJONqCiC{44Q3L+y3CL2eRnR&KXu9YXRAYjRq{`7-QCi=4OasDVG#Wxna~k zR}Htd`9u#PmU1?NhpPEp4y!Nzb&v6KrISY`J0gkmX&zcg^mY@P%mEaj47DVLFR zktY;Oxs05fAcUn{QY_`h&}8w{-V9+k+S4Y*QZ7UI1cb1ZONynOEgbnJgs_yWbuHzx z##xPYXa!+N(zTRpg{bBZkit?fDVA~>+J{5<7ESnC*HX^PI+9JH+SOB5*HVt9s(Bf* zMf#CIEamvm0L&`idAYHm+sCe;#4IUC_E5NhO5!1mEs zb<(wzlhNn44P@&l7{b-ErJSupn#Oid03^jyE*oJtZ5N7SWnw93W38tB&4ne!QZ5^& znt$^UVku`MtfqCO^EI}BOS+bF(lmaZwc?t`dN{F^+fSV|l}(;tc$XAQxtuV&y*Imh zYsFG-zYJERUv#lav6Rb+Rn5P8FtL=|4`wwBe=aZ07fU&7>ExGriA>)^0fVJnQY__i zs#MMMJ(yU^WouK-TRntW%4H*D2hqG2Fj&eZ#ZoS3c_q^2KY3WOlshoi?)OeK9l3;B zv6Rc5%j*0|Zib{-${jd^YF_PO#ZoRe)*6O8ybNL~XJ_D<$QNnv2Od@|<#J=Knuof$ zohg=b2hO0H5B9KPDVG~-)qJ6sK`iBRXK3BlHSh2;h^1Wa3|xI&RP&2o2C zRxIUgm{q}LfDH^#1;tX%hFKN7&V|*ArJN1RuHdsSHYt{JIkEKAyRcfZl(S)01uJ_| z1@Y=nilyA36!Eex=Q(dC{$RjhDVG#WIX>k!FtkH8pX4HH#Zt~jcuSRePhaU_#ZvCT zSi25;ytN>R>6C+=~48wHKCh$%}<^>R>6CyreTBVJSDHiM|A(E1ujI zZXp#o$g70ePAyx?$yr(@Eaht1QVxGmtaT+UsO=~v^d%~;BD zy7b||We~zrF3Fa1ch_cWTgw zK*3TjX)NUg#=?>GgQr-^)w-5)mULhOQdr6**;3A`hH7C;xmvN5vjkEQXEG%uFdGVB23Io}v&Qk&PNn4j)9@Qj0r6d%M;B5;*EvI}119j8s?jTAlrlF- zFJ?*6qq_{Clz9kTFDbs!qv$)o>&da^&wN||~?p3vG?|!pD6)(y2Bmopre#6YN3TU|_#Ff?Hs6S&&V8t@ZruwlDxRfp!<|ty zSd{9d`LnS|+z1oRA7B)((V5E#rTXmgWb3Rfv=SQC=S@#h?<(Xz?{mmgT<0p7L%!HC zZ!-F{MZcSZ{t;&)cJvLZPbp1)&p=)FSQGcP!$fL$B3AtDZuC3z_(9b1OK5Nc8~NWO z*pYDTb>;=qoZ~O0t_$MDpKroLs=4_@XPR~VZW@$9Rs8)3j9UYP!LQQb{WdUYsTX1< zEF`?iJW`M2fVI^>VpAaX8ucfwrAzx^5KO0rIB;sTUV@G)F4upDNkEU*N0|3S-5A~` zpa=G1eLv+at7`Eh_Vnzb`3%R=o1B*jKR0Wa~N=zl(PrEs^;f&6=^c<&VRn_*+dIe4!aA{}ukJ_&eL0 z^g#JEIJDw}R^KL2{!kmFzt{YX8Q<#!TCe0l{Gd0XRUZHY4QO3J$RG6>Eb*>rAbr74 zdLO`C(P~t`_-EY(x2r2^O>gjvuJMV!LpjBVG^saTtL1FLulk>u?q8Y`2bID(=&w*- z@gKSa7L z^l33Y-K!e)3{{5+eT=8FH{_@TUbb{kC&j8kN2ron5_0~7+;O#119RBePPmDRTRFd= zp1j*!6E$ccwJDq#VP`nH*42cXu}N|#44OlmQB7eJb-wlGHa0&)s|IbP&0da8N9P5c zNXO%Frl~=9(xwUKcX6TfY`M>d>d>3S8KbjU4*nKux=&A)=|0R&CUS%C;+lOXW@iBh zlK>M`0jt9Q6JYQM4Ja=QHbzsjdn1}F(cB<^=#=ssPLbLUnuJ1APDk&weZClGxxpSZ zz6&X=q7gl~X>bT&c(!`Ti9*v(tu)m3|f- zAGr{}QKP8_je;lK#T=pi!66G#u z8I%pPccBH(<0B6^H^aI6>K*zHIyHDCEqGQQ+2eG9g|7f>v6vRQZ2RNR&?asv(mZRi zl@^=W0Z%z!!hvAxf&A+J?ENCyOf;>reY_5?#*r~Y)I~^m!&L-8_qzX-{ z(4@MUR2NB2e0G3R7mfvh&SA6#FPKaE93q}L;h7F%oVzP8W$8kt@>0_`dKau_*1G>gRQhTPhd+M4ZW_V`fTEo`wFG9j3}=Ij1ea zMH-l6teB3oVgsk#3+V{_Pv3(J{$<+`#1W@Yf@te5rKWa7#5HJb4Rp@5ZV)Rq{d)wv zaF^@6hY`cSNjAI}d1t)U9o?Hl=lnI37t`NLvc+zg&ip(2Cy+bc=2&1uls>Bj^YH>K zjk?+jSH+aG<(Fd<)tsAQv`jkJ0^BuYFq3Hz`M~{%_r~1F_-YiAX?8S@5V7(OyYl`s#=bgNPlRZJrk8|@RQjy zBk27mf$|GxDgIk(Ed?IY!@ENXv&t^O~l4o13va^x)Kf4V^8hcYC|9@f6n%xG| zdBF$hylkcaLhuS}sP&@Rb7raRfF6s*%ZQu>HysPB#<~T(g%LGcG;dC=%5I`7F%OL> zxL`1*diF8;(@a;CzF^G~h%)uDtzA*xqQ$e8tL!X&0X9P>WBG!6=fUb&JsNkv5!EeD zJrSa8eVmsOUu$(omh3#exUFlZoW(kw|FDV?WPX}Jz4nzelL(+sxOVWvYiX|vDFfjSV*XX(?2~j^Z)~qB%-#Dms+&Dp&-X;mf|Wqeo}iB#=vp;eP>Fc< zBt3weH_527$+WO&Ms+nlaMSl;!?e}_E}F4?4xSHn6c~jh3S>pC9wW^cXd7Nbm$b5EMeVK}36Yc!48A3m0-wAA{=^?q=wxiLFJng z4F6t&5#$vKb~vd-f|27T7*!>~=<^tK9CoV&^}8gv^dSi@drg9E-$}4N7xOFF@$$YB zTro?6E9)e3a!oJ_ajCu;Z;I65Kvcf`6=(;Ld9#xcfl~?s-dsU56yt9fFe&cD%Q%1ow@Q z;QqN1JaE1Q58ffcLr+NX@JABt3E)l-c6=l%!J}g(cx<5rd+Q~5@?Hs^dP{<*c}f!O z_{_2JBEgQ&9xuUj<0N=~xdbmQn5~k@&guzbp&y=8Qn*@u};Mszm7I%@L`a}tqoF>82WfCms z$Iih{XFVc84XJU0omPG>!J3pjrq0fn;GDq{)SV&0c^f5IdxZq&-z&id&r7iGD+$)~ zuNwqAUD#EEjYB2aG+%;!{G>eC>E%A~>A_B~+|IUj+CNfKuRSfP*Vl{XoBT*O*y*h! z61=@ff`7d)!8_T=&~!mBtQ<`*#e~*P>)>v6(<1=TO;3}n|F5@y(g+VpgW!|~C8aVV z#hs7J$T`yD2e?u`HVTyy4z})xr$IX7I9SSGaCfj~Wu8%hREWCJSVZwR-HQE|v=BWKWtvFzQ8@t< zT(=dDqrsNr2;ps*e3%{hL-5zZMtF|eoraiR3CtD~H41N4jif0&Gn;XhT}O*rS}f7T zPG*#&U&O-S^2CZBNfHmq4s? zKc;pxZk_ah>9$x+;B{*4&4|;sYeQ|b5@;V|L)L91Vtd_`lIJuVY=v&u zHXDf?dsbtdGbX1^oP~O(HJ;8)O}CwneV#BtWo}}Nvp#0^vOd_r>$J0RZFGp>uHHpZ?8CBrnwyP%3_L zd#qmi3DVnfOMZg(V^ z1p%1or=id+3BxE#i_p=GTipfy-C2}U@!CS6&n(3P$JirG3Fy0WwBhLY2j6;41& zmaeRj%R?7KS5_2Uh(zI1YG2?uHO}XTSLn*xo+;~y=}C5{%I9DRU0K`3oalDgrK_1% zML{CvbC)c1Wo@6A9A>2wh$y$tLRZ#S$>Ii%2S`+Y2KLfISJw7KiH1TDh%O}1#lfQB zu@D{CcrWW2w5(-~8%ilPJl;+RO8S@crZ*R}1*A zLolMk_mgY+esY^@;=#=d-%oC)U6T#pPj1L24c||0OPe%&Ke<^pY50C}TiK-H`^jx> zlZNjnw~b92zMtGx5x8-a++_mk6~<66Ss^tYR(!-#j|vU%LJZYmfa+5IAkcUO(VV|tM9Cp>lxBt1?H zAv%tRs+4>`;Zb%Hl#=f!JboTDiuavwC4K$VjJC-)&U2ZE*K|H*ySv?m=E>4AJjLH1Hl4xQH;N?E_2 ze8Bajmj5T`6t+BL4^+bclSBR=rjUux#rb>y7XF`{w^)=_M)^Q2{69JWcKmWaAq)Rc z&U=1&J|CHd|0m~Nzd0YFh5sk#AsK%cegfs>|H;|og9%ZLim@S4K5q;EPfkn6=bnge z8j#ONapC{T`AypAbAeGlp9}v_&Me3HKn{xdD07iMi1PVelm912=>!Z@NR*HE!vB-A z$zM~tCgT#Ld9)z^PtIPy1;uDf0fItl5RV0f|0idQ!|ZNf@SH&Se{vr7_fa3=|H$7n+SpPaw@ zt3qvvI*iU|xH+OcKN0?)oI9*t%p}TF72*HMIWuWF1}Vx@7vcZOx!aEs1M0;C8R7ql z_ADj;Pqa5HOcz4_pJ*SG3YIp*J|a56q|!^t{}UZzQf*4f{}Zh+sk~D1|3rtHRHst% z|3rtGR7olMf1;Hp)w`7ZKhfbPRbEQ|pXdma8dggFpXf>S8{q#bCI3%!q)Ck}CI3%! zv`LLCCI3%!tVxY8CI3%!yh%+cCI3%!qDf6G?T+4vPBE#IOUeHeJ>8@xm6HD_I^CpB zDJB0;bf!t2T1x((=xmdkTuT0*=ouz;S}FN|q6%6)YtyP{Bnel~zhtpn{7{D!r7fKn0sks%0rzfeJ1$sWzo#1uEEL zQrV?s1uCdFsk~CM0u@|FAAx6-QnCUSY~xHKE0EzCh|K_N!Fl(zsxXD0A&{hh0GouMhmhA6=>NC=@2C_8ATHT;VGlA1{HiRH8B?X zJarV-pn|XcVxB^3vIZ5rH~~O*ugj=e4x|Rc8Wf(!McsZL6Xijrum*(-G`A9VgB$&V z@=g$hpLdL$5>${o&S(bCCxtaA{HY`H5U*zcYwcP?U^!ExJopp}q43)ljM|V`M#%{z z6hh%|ta}rdAPYn0^Fmrjd0r}PLg6#49Zl@w!&6jY6ACZ#D~SXT$ir1(6AE9EM9VW* zVG|0!;BRVQVG|0!YGlzyI9e4pq43GFr8BFHakSK-Yd8i`9@Yw*P`K3EN!N%6l;IHL zC|}`evI&KcvyPGyfQ&{^C`df$6*eKTC73Z4ByQP!#YTA$ENnvIe>Ln}9uI4>35DPH zDOHr`#lj{OUTJ$ou%&{w(|DFFY(n81t%;0_)$)j0*o4AASzyi*JES~#7B->qd5QgP z*9o3U3!6}Q=TR(pTrF%u;VnsPXn}Z~Eo?&JM-yvlEqKx`Y(n7;ehXRoqCEc=HlgqX z4V63<7dD}AZ#w|In?o{pWfb#yU@mMz;Y-+m+S~^0B9-5jy34SS4_{7)riE$A&@JWj zXkFNZ!n+#ic-}5-LgDodFnI_sY(n7=&5ZDdL`8WrFKj~L-T~iq@rE=cE+C&L_rfL= z{xJrj@_CXkY(n8n6H1l2rt*2lFKj~Ls}ibg4S4!5Y(n8pNfDwWXh1fh@LYd)QtX@0 zMM1VHpKlL@O(^_HteU3xqMN8An^3ruW1P2i1I4(3Sa|eKnoOWcFGu30_r=80@aFSf zgpdk_4<=Sv?4o>2A*4d#BD+)d%CT;TFEE5uC|u%9L+N&qncZ0A^WBD!3WXo=4T|6} zt)hJEA*4d#LhA^4BdI7~g9xcmxS5qpCSOjzArVrca0{K-0E5k!CqgO|Zg~_7zE=@a zp>S)zg-mgSA%Cg;Pdw{|m;3GzI90xc5mKS>wQ-7kzLpVEq40iFEvYjh@e>j1T4J*k z{@kadfy(C#93d47zv0uFe0-}Tq(b4Zd^*V&y^cDP3WXcnHh2Y^sd)n51(9Yt`?orGq3CD-^6ud3h2f&-fAPC_#+q&}iD`mB9DVVm_PoT<(lEb;d?Fn-$ExT+|B z10u{t;S(H*`={JBXkmJzC|^Pefl>GtZHktv#^y_u@27;oD14{pMy$b%>cv-AKVT;o z>^!TqKX!4QXPfQCbP&T~wvk?O?;9D-qb=QEAr0<)E}h6_z9sCs=0!#~Ba#u(PJ zZrJe|#FZu$R3#nDQQV)-53nx^@-Maz;0rVsXwJWQ2&SS@$b@aabFG~ilpseTg7PTe zvI*OK=gr#Oy<8hrl&{@{X1?>KzDEZY<$E}xneTjsW|iH?JHD6`n)%MbfbR@W4&pd< zjZb0Q)Ih$y69)0lc>(ERvsGcuRR-Vb3C(^wg}Xx|n=MF&$!n)%Kv{O;M?7hsD|i(UO-vCwAcR5#l< zxfe4w(&9wivYp8+pYS9vE$u7J@|}M$Js7>08H!FqhEpBWM_wkC@jR6mlt4*lc^>4b z=qHp-$EEWy-{6EO#sG5pi^|_*UqO?RzVQ$f>lOJzQ<&vDkJr6~AW>i?; zJMXafo+*>BN`>{k^VvSRzdd}lDy;9F&$suajGC`Ke9tPZ@10NAX1Vb!@JLH##z0hg zmNt$drp(|gUEz7}OrCd{kkL7m-G{#J+&W;q1g~n)aO^L2kEtjAtr5baF@rCH-Hj5T z&|)FtxMQ^%d;?;)iF8s6G94M|1Bl%v(#b8D&q!BAmAaSG#-9)Q08(MLXnb%6e5DRi z8LGoY^%H)gIZIUIpB@~blzH($jaBF1$Wbp?t{VT^y-P!N#UQG0cpT3If3CqC@ZB2H z5i+omsWc+9Ok<_%u<@eQ-s+JHsN=7?8#mJXO7S&K{LN)*5xIj_6H$T2L4DhC=KWlQ zGylkbTJb0Bjni~J+$X0`L#tnCMVgPs>H2dN?6YFULVV&@kxX=f-oQGy)T8>FQ%4=z z%r1)@N8R^u294Y5HwL+_mhOygifF#Sm< zn0L@WHgTb(7*#3nFe>H6h~=}Z44#H;!iLr@%-knF+z92BmJfdq&l|GJb?wpNS=NCB zcU=eSOe(bMAVftXJJHHQA3~!#{6=FE+;s)PofzA)+{w^c1L=&488jflT~{HvGYC>fh|WV+7B%F2T9M$ct0lNI7PJ;KRdGEn$Uf9{ zM!)WM{+NQ)kSC}k!ChBNaOcY{wHP4;cU>*PT}(@_cMBR{8T~z zBfyq)Ye6w@s1EJ;zLErY$IZ~0v_PlImkc7CWo72&;YmVpcN_@r#L#^aR}|z&6_vqB z$X&>2;d-DKK8vD?g5Tjf`0GAO@k@*`OIAC_W3`ilRqpxJkk!sHtaiNrTvRhdIH%O;f6|Vuc8+v27aRy)UIwUZf1t(lgi z`_W`v7od2oc8Mm_X+l;z$78h<6RER!n$hRegsgUs$7&}g?8Fc+YV<9hiN|UuCI@N4 ziyHlcXX3HiiHUbnqrdh{JXSl=N{dXRvv9a4tad3LtDTu6QeW?)M)Qx039DU-$7*NB zbd3BM+%c?nL777tlW2Sm`pqG$ow4gs?Fv)CYG(?OFNiv9^b}yVGYWZo-r=8~0<3m1 zMQjqN4sUuUV6{tTV(!llhdev5+8H})fOeK4JTCmy~0B=YQK*o5Q zT+LRK)lO`zBZ#b^ioXkV469vB^pguCZH-*vSr}G3zl97{n`%rF=2((&M~ZZ{+xNmp3<<|`Ey2wQ#lK(s$*E~Mx^-4 zoa-qKtDX6S!Nx^Jgt&;Tc8+1Si_3WfbBfZi+QsF3j7qZFIfm7aH@(}G$a_?B@8K9$ zyEx^qRB~N$46B`AIkp}AuTa*dxU6;wQ!m!#UTKN1%q#5><#@4Xf`E5PNpd!Lg zL`cW5+9jH)$OI3_u-f@8JO`o1E~Ob+?HrfY&Wygat>Q9jxkF76Ry#k2Y1-qifn!+h z5-t2q`_#2fF|2leTVKFQ(M(0UdKQM&&Trvs+9aBhhR$(W?M%~TIg8mv z&hhLFt6c*))6}~?Gs9|^)Xd-B_uLFAhSjb?24ABcJRzI98dkfcw%G4`W`@m?QG;bkzacF8luHlw+7nBhNk&W6>F z8L+*Mt#Gl4WA2CCE0}|c+7*;zz`FS}vQOWa<6vJva0cE*sw%1x|mWtft+B$~S&Tq@9Y|Y;B%nYlY-^_zl zkt42Iiea_$n{geoXa(3<39FrBSnd2~zCy>jW+{f%&Tr-wY9Uv;wvJ)7OKPhkJ6y9A z!)oU@^HuOUnvvDcscU>|6Zdq$JRkZh_@ir%@Qjpv_t}FSnZP9s>nw!q+?j^{ARuiHo<;U zDCiu+YUeleRj{XPmSR}#{AP(2oZ{L#hSe^qt%{uQnxz<4JHMH)f_Ko2KZ zSHZVkvlPQ>=Qm5N;9=L+F|2k;ZE@G*xm-$4F|2leGhYQq&@75aD#x(enUC8O&L~u5 zAKIl#IsY?fG2<@JEHWX?bRq+vZZ_o(|?VevZwyXRQiJ)Zq(H6#Oz zbTf~|YRJ8`YT6E1mUhpWiE`}gR8GYqPl`*s=hIevOD$>l9HHIwRm0Nmr5M^hpTg9S zvtOy6>{5=c{nEN9NXD2#&W7Qg24EwBGml`|;0YwzD-)7E9@=ZICXi&WOi1>OL{BKf zY$_9yJtJ`@48`uCOi1>Oq|pSD?3D@0o{*Ifn={gp;U*}o(v>=A`1JlZ0=t%<%0=727>WYrd5z6TUsuY>uHiMfmp0T)w?igm16R z<=e}3Sl>@MVGZ9N`w)Ivy7E-v+bi?<_L6oceB}$g3wi*kDA)(rLJn0?kQJ^B4yS<8 zB8g5W4G?Ebuxz&;^$2QR_LO`$BvX>Vbu2S{d-k`Eqi=$}84!WrlB$_p#1AOeHn1u4o|Lg5le9MdaHnGkkllhTR3IVr4uxu+MtJ)`G%sCM;K@f)o&;oB3Hy*Jza!^6Gg$BaV_kEd#NV} zlK`iyo~#Q0pALg(X+U{p@HIvQ<;=!tQINmX6TZDJgQlTS;oIx-{I8<>i^gSXcsFhS z45{VY%jXY+g>SEm{B%c$)>i_C#oYeH!UQ z4f*!E8@@er8h*QEi=0d)`S!XSzCE*@u*zjrl5elO;oG~Hd2E4^?Vi%`?d@lqZNV}T zBfDrpO1@MJcf+^W5aS$LaQD>R@a;9gFnoL64c{Jr@vAdU2V(BhxH5PX zgoW(dqTt<(8mYPNrGRsR)bm$u zE0-WO%cRmv@0o|xY?EqJQ3xBo=pu;bW?iBb#X7{Rsu<#_7A+gE)H$N!RaH!9d?Jfn z(e3QGTscO(rhZ&p4m5{9=q?IgfSiSl279*S-XhpDo4<8QIkFtw(`h2<)7}JmRP_1) z8QL#Kaeap6=*4JPpUR(V?1w0sRO--|xXOhEe8K|wTk@jdK}LVZrB&%r{Mx=8zg4ej zhnRk88DPqmoTAjZ3y{ntfL1yBRD^wc&Z$(Y>eQHArOZRI{n}1Nb%rk=rqnOE-84Q- znMN%4!<4O1VDne&$xhUnA`4vsjp}eI74C;AqHuR-yQwiBrfgWF6id*VH}V=<$E+1^ zdIqCfpeCz)1{aCJ?=)zNT6G=Y6+>wdOyP8c_QRBH<})9r3|+)Bj0LUj!%lBnm=9CN zWBj;y*_x;!lc_TwraXp;5|>+Q;eMEsGBsYykzfZG1?vD&$Y`YqDn(Ef9Dv$a2Ct=( z(ISZs*BFJ_K|mchz6HgY*1E%Mtc3g33N6s%UwX1xI#Xn!RA}&Yd>!t0ezhV(+uMM@ z4H+tf&0$l>=dcS^aQnsOmRh(&`XMHRU&}$N4)Qu45QU6Zil9;iMZrwe3lb_B zg*BqX-^8T~wj5H5Wk_#=&>K=(pvNEeWEfJB3D1LXNJSVMQgTUD1`ps`$S9_oREfS| zav0P2EF?=ZDz!W=X56Q#<^mb%G6DtiFgs(qK4T(2M;wN9CbQr(qe?1Y-2mkry<)u? z1gw}vq8&Yt+U$C0=j(Kgp{oh8_f_siR6jQf>V^6=U{LCr$PJTw7foKIt6r^#ZcH%Y zlu)Bzrbz(k8SC`M^Lz-VkgurjbQ;tT>D{xH>W-?pg{a&nC%M{3_39PIA46+f*OA%~ zJZr`iy6K$6yhA+gGy3&&67$ZYwl(@I;|2W)Y93$id7k!Vy%5u%YeuZx>#02j70q}} zH-iiIcMN_$?P=fAuc6P`4Y9oMQQIF4&G?Ug3u{t*-rqd!0e#~|iFsR%MBYo6Bk#w0 z6Z{{?03WhSKj-$Kl8Zsc=eqLJ1mzei-@*=7g4aqv0 zyNSwLYzQ)b(pR39kn>hg`Kzw3OUU^&l~UV3^~t#7ZEX$ghg3>!l~aMiiBtYXrL5?U zoIP{R;*566nx5Nkl&cIn$8K_m#@4nsm9na*IX&TYWRKwHoaiajox3sP%%IwwYpHy0 z2HII17&2Wc(!B)iogyWhi<$O!d&UC-^=p;-$Zt$o0Sy`Bf6d5{)qm%yUZ8H{5pFd@cbmq-6UF`az$Pb=!f>VewXP3G;^ID8Sf{cwBr#U^U<8$T}dCF+)rxzuHx2))wei&+OxXgI%WSaOu+!pW0!MJwP&Ithp6Cd z=Zy9##h9op)Xi^3=UA*3#-cRk?dfhu$;`1m-j^1SM1V0o<+RCeJN*`?(_-*eSWHZr zi*`0F0=Z_wIQ`WCUcBjsKcrkW(wW0;=I)%(7b6x^KVnAY4y*gJJZ=ECpcT8o0bRtC8a zREoG#B*TBlwuVnbCH)sn-G>Ff{{=L}hAJ>^8B1O^XSwQswa%oSR@1h6vYV|;+^lh? z)9&!J_{ddqgcKaTs@O*b9cKS`*I`V#SBN+Gj0W_V{XcGuRnWN044SEm0Gg*?N4EZ- zH(!yUYfl|Fef_`w_zZW?XY=)?j&8R9ProCd>3pN>H;Fnr(*A!QM%1s1ET@jHvVV#$ zt#y5x$r#y6CB0$)WAsVzX~yAMMe)k9jWM~z;q@=lzrsDcUB_-zkxta|v$p>I^(x%*jb(zE-ZwYQXwTwQT@Qz6 zT5BvAH}iSyLYmRD^k1g8tcn#w6BXIwY1is8SnAQQUPX-W*yUMl)i2_%bE_#u2*_Ca zWx4D7U!$ABv04)q`GyMKvHfq?>uX$e=^T5vwcrG#U+BL_XCY2Nw*=G7Zl1;C`YC`L z3qQtqTJWy#zhB>ihBm}l;aR+?dtp6mh;aoic-Qwopg+OvXo&HUXYsNA#}wDXUyt`_ z!F#&@Z@TGa=*Yw&8)meJb>k(j)|Vf5^I&v;Ww0?OK%t59S|oy$F=$}FcB~hk;cyKe z-#iTk1a)&cjM2x4FwLaWOe$znL6b@~sZ^5+nN&zp(`RF$dldiELm5-~ZYdNZDxbin zj%$TvOOp&b9d@I3nx>)Fc;ZBe$HCV;SvxBW@q9)uYRC`NPUwYn_)1O)qsxcX{8Dg8 z3!)sx%@gX?XFZ|@*5)pv@+9HcXP(DTk1z>VimFH(G@5i z&%e+WO7}H0z0u6{#z4&TYv!SIo0;Bd=JiI0U4)~qSu34MJ2ZcvCu?hEw(eh3!In46 zdz+J^LCc#EfGuxUmV&KIeC{q(-dPJ}->uATiVOK0cQq=4#z{S5jXNL32D40yvP_FY z@LNqVy@Oe%MOm>HUBF_f%R-B2hZbGy$N|2(OzCe?5EVg-q|8{0CL*Jq@Y83Ziu`LG>E)r)&&!l5&!PlVwe**k zNVPJl;7OWBv0QjZ@m?tk=0P<0S$i)BGya=id^(ogsFo+Ei0W`1Gakm2 z#baeA?a(t1crtv|Au{uT+F`$^_|a9ogebv&`pIC9N$_!}l|Eg}`3uCYE1%Ec_hR>vIJ&!Pnn>@$whKjH3)%f8N2 zX6Z+vY?%F4TJS}3Mvl&|ao0Slb?9^G@KTd+ku$>jeQXxZ{=}z6=Mu9r_&RJ0xrHhU zzR#$Unkxl^3$Qz(KA3T3sdWFp<)Ie6pN+xLQ)i-(n{+14(EB|-*=<&4o-QlKQX!rE z&oHJPk7cpFluQ*1sgq9L`32z2;j7hcq)s~d-b>+NyEBI^tl|z&w`V%4`Yryug-Rb+ zJWB)Vx`DTqr2hmtJ4Fb&tSLHf&$Kl76T9=Kc#N5 zajwK1-NKpA*lmcZD#jjk^vr8m7$M+^7Xd+smgB0%JxKY73t_fp7$z2%PfQB2P~+aB zaw}$K;H7tQ%oeZwfy#TZEClX64*BDhv$FX}ItsUDVCe^25OvaX2GZV^9a|n|RnRi8 z7M7nr2d@On+~k(Cm(cQTEHZ)Sx|C~*livO)rhlX9eOO@vt@J4dHSZ{h3A4iNk+kO$ z5a^?aq69#vCAC+x*V6R4H835h#d$jE$0Rik@wC$H$L9Qlnf}fCEcg;VSkD>Yq-Q7Q zQyt!+A?Hb;&>gJH3FB@4LXR^^-=?w;(YS=hU9}I&F7jvS^&G6y9q@%HtgX)iuLn7C zzxo}Txq2JsYtxQhCYVVC({`NRI`cj>QsYnfaAv1r6zg;bbIosz_}i!jZ-A+tuvOG{ z>l1XQrfx>kZf(<@sdpf&&N4gQHqO)me0poe%ENa$QzxMmr2pXp+t`P-C2_r>{mb*CbDI zre2MK*7C;HiA;BS=R`%Z`2 z;fj}_p1gxqs1E<}WEf0i92-Z8vBJKwm1Im7izQ_9k}CTTmhl80`gK>$(Xj>DIWQ!f z2T$Bx>e6H3UmET%`_LfDK^$Jk!SdBp4LO<00EcvRQ{EME%4JmE!aNi1VMoL%>#3B3 zg;Qsq7gMrDYSzP4(mUy?YuVXOy5p<5Z#zEZU$oSmz|(uI!_{BLEX@YdA?Z1dsdUl< zM}f(uzv7n$}gyV3L7iEX>)@z z8*?;9u(T~7B*M-rp^~0nZ_8qSC%w^8x@#mYtBHR3dU~q3c2!Y1gtP9NH&!GlN9cSO zzT-sMVVD^oTWi!ju8h54k(ZW1r!v1A;+`krgHT$`le`6QpBD2ZXJP)Od7fnESF8o0 za2qy~+pwwJhDTg^8Xi+bp*rNxv$kp3uCaFmnTN3Kfi#)E9z}#TYi8}-yJ#lWyo=Hb z*1+91xQk|8Oam@>X(!C*1jCH>!T4wN?X(<@36<8rDa!L#NDX&9WnnA z#J$#Y0Za8-FAG>|Y}#=arh3zE>K~YKsjbXnfCcOXj7F+&0Ye>AV+$Bpoz!EEF%~c` zB&lspvfP<@Cp`{pg>L>xy7_bI<|kZvYK~Q?4u?Hi*foxy!M-&lUV*0+#%{mHHRp*%T;u61}WBiLnhOJ3qv(APS56Jt zMkQx+>WKB#iOPGZJjCuC)9%s)<*fIpWmd!VB&I)rjkxB^^9v}!SLlAp`XKQlX~VZdSaU^)?av% z7>Gfw46Xu1p^2_z^oj}`(#-FSJxx(?69ixo$F&u=bMgmNbMzck=7eEgusOL%ZvM-8 zB@|CpvgEI?4&b!qGu!dV{~tlwCs%e4o>~zvWV9&A+o&>lgaSs3g7im~!E|`ZLPqfo z5*EA4V3-0%tNJ2#O96I_XY%imGNtiOqz^PFBToJqpJsuoIup`eSkH}=DcHZ)0rp!E zCtHzEbLOm4eB_OaV-DU0HNS8-YNoVLT}Z;6s+S>sHXb5GnJWB{CH#PByLgC&)27W( z$IYS!zvLiBb1c4B9glJrO`BG&ocqYGrmFHGO^wg0s!AawAxdCzwMGimPEtFX_-0TUzr+D3-Dwh}S`(3eP}ErZ!h2J%~j}_c8@Q{yeqnam0Br zW;9G`G^SQ%9Fz|=3V9}0^)AGGA8w48(hF1}Utm@Jj09hJ$Em7PuqV8*38W#$R3SgT ztSUj2e}*NnjFYIz6n=HN?8ZVj8oD8$Z65+F{-VeLnzmP+0pU|n2bS$bKQ=zd^s5`u10Qy8zr3(4W!YcmNA`*-w6k8-8lokc^p~mlj za6PVS9Iky*T~nrfGxN-fV*IIK%2XjS3$t;lIvW@M1)G4(Kl2xA5S?lNSdFUsCk#k_ z7Fd#v+X}p9?DHjivn0-rK_@Mu9774bCt%J$coPUQ7J3#JGl9%3`VeXU&6~ioui&#& zN(rW_8o^WWuie;TT^bu!iQ;!eRs17Lnep=()m(%hfU(>KFu@N!U{lCHhEz2avD>_k zUUMAZyj_;;F2ZyHV2SrGEiGsOw%#5ToGYxtY3kQVaWRQNwe=>)6pN9-0vOn#3f3-yymlrpr{JPgO-roN1abj7CIL#+B?UU& z0=WQPlK>^^k^0+gsr3Ur_s*B?WrX0=WQllK>^^k^+S=)j0evz~Ur8iMpgfQ!J1RP?H2GQI{0xW((v3 zoSOtFQI`y86`SQ|zAy<$qAnSTT3vRQQapVu*;CH@MfI~G?!`6S)=;nixL2vO54Xe} z-&yyCAoo6`haDs7a2ggVq*WoG8Vb3wJLg>3>kXrqjQ#8 zr+dM)da+Vt#)@I>D#X=Fj>rqPz}IYuT#v*am-Bl7Hw6?o%;se z1?L|Bg{s7Mobw&J0RMgf*n0grG~*sba)X{=4b!c`zifCH)W-0bhhSK>40e1XHl;EZ zS1KPuH|QH6Cg1@Ja@dBG0I^`SaA3Ej@5i>LuaC{y&m%Q`Z8R`jx9(UIJiqVt~QR@f`^&=QfO$OW6ndA zybd6egE#U6l`$&@-M+f|uO5|y2F%0b$jGB@aDYtV~G zmz-J-A#PS14nd!;*Y(hBmDtsYJtwi75j!lgorv{#l=Go-jZ()VRgFPU`M$ONcfVfLLi@P*E#_rSB>YXTy zuZs5XG3L3CnDAuOBXTjfLmo91o{uWV8h#^E#>Wqo92t)z z&CjIpR&5LrCL?{JUe4r1BwwIfR_KA6EA$bh>vczHzCoL3JYtGC$A*C+x;B97)gFSf zx&>k@^Kn^_VCM7<)|Sz%2p8C}6CSWOPrFb$hE5fIh$k!)S5*(em`u8+lYPcr#RHoT?<$^oF&}a;`tO{J*L-IxRc=T9 z-tWsH#}9?~(%~+{p&czgMas;g)GqHMJ?IIh*psLMh* zsNFio+>KTD!e<;1qw(6x{$`$`zh5E`pCHs%z&$z7%_G6Bh zBrSF8$GT-b>$Wa9AU4dC&r|BeI-Dw^n}Z|Q>luiTfMxB;h}4e6<#N3M*KLqh*C29* zJ|EY>*1B`yQg1|Vj^9q^E>0LfnIw<@T(0=K^kDTS<<#FSv-U1aS<~c9vzIuM|Ih18Y6zSD+nK^*6`d^2aBTGH1k#W#9> zQ+kxNk<`dor;TKPZWDi$Hky68UB50k+x5FP{K$r1Bjl)V*S~l%1t249YGc;3&}CL_ zfQ+v_IoHw(n3B1cR$UhhP5_ai!k|I zJGcSgapKn5VQ-m}pEM)6;U)jA)8}RPW^AsR<2I({W^69E&i6>y>&SY%(?hG@wOC)+ zFp#cmmqEUY=f{c4wBfa1LtOh2E;v)sYcE9mV4OY?U6QG5cOzbV8!qehb-2F%3g_ix zhy}LpJ!i9i6=i0jbz@+jHolf0C)9T0r9Cc}>;Aa5dlmD!5|JzPL|g+~cb;SBb3OAU z&NT6r7d~csVY?08N#|cs;dDU>__aqXT%CNh!lyrO3SVTyq=jFiS#SwK zPEA^H`~3}C&8_!~s4~}y`lA-Ve6J~ftqqeEU-K1Ucq@Ntjef9UT5q#XY}Un?CM;+# z3+j_}tvCpki*9b`8&GL?zcLB{EaLblOnnB~FjhppEh2ByQHvM@ReTY}ug8kG#};w5 zg}yPN2SzgkZt>6Vm>V9?ApVoGn* zBMQ;cYkBgvUB3v^?fPb1={$EMbYDL%*sdRAQat*zV!$|F(`?soNm8N*B)VOHZo`8y z)gM+AK(PzB^elj&ss~cDQSYNq2U^%pTB{px#jiG|V}tXgxffn~)`acf@_Xt1NG+O= zXSJ=Zv9MgECp>^QU&_2cLUWOx^bXRO@sp`*-savQumb6Ad>_sSjmz|xSQG=>`O_Qt z$Tgew)2PB9n1Tl|%LChMBN%s86TH}*e4k^ha;Xgi+vF57QJm})a-EI>q#ST1sAuzQ z^?E%Vi8cQM{yJR_U1uEA)EQmvP{eEb+hOPyK{p+y`>k#{;@_pT>Vm#n%qq0LI|!EsQ%1=wc8z!kO6K)mYdMGbZRf3AH6 z;+5AY(Pd0>YIj49e>IDN&6i{@=9Mw*42V~K0J}t8|HHLkxY&uhjF~a)?ZD=<^b;2I z6&L$v-=p-EnzvvbH@jEVJc%4!q{B~_OZB?hFmRPD(yHcj3+XEQy9QVLAZw1f#1vd% zwd0dnZPs7FY#a*T$--~1or35VU4!dEj^nGk_B_O^FT$nfe3`KQ0-axb0>rB385o>|*x6RG)-_mPdom4V6LXst-s>ta^lxHR&GIey zk{5T+BUYdLjOow7?e%&vbT?y$zGQXZxfqY=+It{hMF;%f1ZC#X+Wj)C*XilVPZwN* z-EMQ_37~&!zoT-C4k13>%5xA$d#;8#hjfaq!aN%`-{`Ay%`?_&h1G6ytu7X;^_ttW zcX)$t)_v+tN>cT$zP9VP83V7f;^r_+9!ZNQq0o+vn1EGH>wT zo=d-nE`2A`)}`-?xpXFRmY)J8T8+!u4{i8{YaPt^Hc`k>2KGJC+hqzJyGX#>DF1J zPXhPVEJiuJv&UKdnQl1`SZBQ*2K4`RRPX)J)~tX+UlT_D$4FNFb}^>#T>uoJ0DGMSRySF3wpmwN{^5?Vqld-&w!QTW_=e)Hbdj1qGg8r|(Ca4cco9_MCOc z7fmnqwxO>PuOkl|aquXO$eU&w=Q``Qa8E2_4CQ`jeX+&9wqY4%$ip(~Y#zU}9toW| zYpXNPdJ&{_*6&%cKNCP}wpfGK`%KHa*wAg6IBQqvy5IOga-DU{zbW(^t>dhiw2S)<*7*R8YO03mO# zskSOhZ5Vh>oOPl&*?qN6-vkhG)@YOGtQWqo=&bibC;M-si*wfBLMP7J>Jpvx48VEL zy4RJarqf<32Qc1R+0gH-6Lra|TXPwJ=&Y+Pz;YV~-i$fxM4jJRC+hsp`a58A|LtPd z#;`qSov8CW>qMQ;S=XdrW$xyBi@C$ae%m-`>&?+=fj9AP9oDiNX8I$)gJrMAIoPc* z4gC%_QJ1u5cL9hF)&$el?e`Z?bXJ? zo^JJz+c5A^y&eZ0eZ@j6t9LQ{4))vxrF#eSi4gheH%exrlae`DE4L2zHki}F9=3>0 zUNJowzZ-tTTD7v;-maD3!M?!TWwRc7jol5Spuk7#bZey9puM(W&%w^JAj@s&YeWy^ zVIvM6r4f0vOygV!yV{m9hH}4yecodKyI~nu*fQ#D9>0UV3p#PIR%aaStB}&cwzkf$ zw+-Ej*BrD4ldR&*1OsufuF!Rv@q=U>Y}Li^k!0H78F(nnl$?Qw!dn^TL31eVoq>ns z9gBIZ7m~j-A!p#BF6In;0kZHIXSog6*l<0<pC_pZ?kwK88S* zNejWXKWbi;U3k$0}CG2m$RVKg!yJ(>sCYM zqMM`om(h0jsQx!wMDxE)eX?vAE28fXQ^by=7qP!V5rv<`ikJcbw&HXPJ-a~>*V!V} z+@rRFzr2iZ#i-9>MLcec_@{;5-Jl4K+nyaY{pdy1Hz=aui&zo)0AMSEN6HodwXmVv zQRaoi-lM}GYykhxmma(-Bx#X(16qfYu7^i82h8HYRMmC}uY-vZ%3EYrbq9oGrZz|H z_7nYm5Pl6yBNWSJefuep`A~yXY<}@s6nq0raFrGSE`9Wn)_*`w7%o=&F+tC%cm0iWLa;qQ+V+Uy3-ngqmgen&f+b zu>3L@kVz?I4Wu}q!CIYt8@}wBfhxO;*X!k|CEjp+?cdtJfRz0{>{I6WTGH|c^iw+j zp5NH5|L4A~{}*9cZiU8f^}jS*{W=`ApF0yx`)UOnxOM+Im+7Uoh<#0VqrldKNYA3? z8?%fYIH%_9zncx;O}3JE+A#2?Z2A+$N%wjkZ^9RU7y&o$V=l&5H6L1dS9PCNh1$eU zR}*!~%8+lz-OMh&A0$zi3`EszKutM+U4-Y{ntm+>B#N80j@2?zm%M{ zaSQk^_CS%S^FL4|>iiEB{W*A9SOKoXUqKJg%50fcvaKbv`o3zDj9e}laC zJ6up$=oVw=>PNbELBE}O1j?I-+5TNkIkIqbKEs+WbF+MJ546sJ0S~lX#ah?kN8eFZ z&F!$FQ`>5_e|N2ZseKG$X=S3WS#N(UYhJaL-)*z*aUp*5wXx=BYvpR6a;<*%TeZ8@ zRQeUGec!eE%U9`oeWEq{#HxRC%|PND`|z<|pAS3EfflcsIndsQ0j;HFiSh=Mz~&!n z^G$N|2TcA%d3^q>^jMU=z~(v2&D%s@t?7%$a3Gs#V|A<5-V?LBN~~PLV=;wUwd82L zY6!e$??QK#l5b#r9=mD*)cm_`@%4OF5b=&D?=BqR;qfQkj_rMoOp8;# zH(w3wQd-=Pl-XP8(hf);ku>)fx|Dm1VL!NE4eJvopaX6~+I%&v`!QYcqx;pc?r$GF z@zwC|f7rb$s;37gZ0hA9vd-4(5*xZZWB)_sX5HwYW(eyAr(S;wT#nxTHrL~B?)W*e z7v}N3GX5uyUQGw!aSPliVEQ7>hPuLi9GM9V?y?WE#Gwf_H;*HE3xJ;FBWE<*R(A|6 z^w4^}9cfm#uPtV<4HG6?9RcHR)Vrm+X#bt2{Ui$~;HMUFqy?Io9PmH`z-B+Q{hRef z3rGe40`@|hHGIzk{ZDd0xntsLI9C8`9+ z5l=}mt9To`24}fN46N6`*dkorfTTsZQa5LO5oy1$YvR5mFSr|z3Ue`4EZn?wGhHUa zRK9_gAO8M;l)ZI!Fh}1*?wO3TMLW9tss>~{I+y3-VszSt|KUS~i^+uJS9JuY6nt0+gF_cbZ8$>gXz8lYZfQQxzu`~9eoTGV|O?TzH9syeZ*KU>sp_n00ifrag8 zY=R+V5B$fXeViP1dIQw4zndPo(4yA+QRiFKZ!Fr+$x-(X{Zb&!Tlo zj(VU0>Jt|A1B;rz)K}LhENVB4)+ae?Ir7BM*PK615A@t^dZ5~m+8RRkz#xk@JUQx) z2B<47>O~fHzaMp>MICL?CM8ErUzS+cw=C+17Ih3PywQ5fqMi~mV?WD=33H&X0cy9y zrU!c6YkFX(AN5!W*#iqK+VbS62O6NRvZ$LaYWi|tT^C!_vn|@X+d;@4xZa}OnjBTFNUZBbi@L?4mcYUr zttA$9r$u`(IqLKVsMlH4Pb_M^AN4tl`h-P$Avx;a2B@D|)V}wd9ysVn&4G|T@VZ5N zH#utFS&2PRa>VrOR*O0Y7G4i5v#1|fw9k^G)-^y~X;DA3s5||rFId!r7VX#Ms0SLL zJ}Ic?Q6jy@SI>4>t=am+7Ad7=Qr}vRJn?h2nbK_j2L0w^X1&j|>-^?+24MmW%zD2W zvCVoHN|Mb@ta$q<+5r0tWRUf~&|;R@(DAMJF944>XI;@A7>7Ap3**j=-gc;jJF>;0&cENYcSTbdkoZv)gzEb2asdeD!0r$t?D(aukf znzu4>M834B;fG8QjDdyM0|5xx0~cGgZOKvV8lYAP|oEsTTEGi*|Ez)B_Dr zZ?vc{Th#PbzPjFRQSY*7_a{d!N1pgO`IkkFJZz_$A2kI+_Q2y7?YZQrI~t%)4wxQT zX;Jt4QBSj|`z_i#$x+qn#Jb*ZQD3#FC9v>D>mG~xp+);5IqLKVsF{sST|4YCJy7pQ zO@)v>@U2DrB{}Ne2B?cH>S~L6(2sh$McvbDlP^P{E*_WMzrLC7BH zWYJ2JqpEWf>)NZa>DO~DY6&d79++WK`&hJr$x){_K&`W=Z&}oOKk5S(b+|ei5B8 z2Tl`Ivoi1WBi?5bS6HO8lOrBzfOwN2Vr72#39~ZS+ZDNEu9=_Tz`(4`GlE>1XFYDZ zDZS2X&^p}-LUvO*gb5S5+KOV!=a@2C{)RLxJaTl@2q8{|4UTaZbvuOWLj+(bNv91?e)Ql%g4~&6@Hy?krsGnN2ZAY5V8jjS+pa`Q4cggZPrYq2kidyd|y4Uv53K(fiS>7S)0`VStqQhBHRUyP9lAUHGU{90>83t7%_F_nUAd(y!BxL-QrhHtBreNdL4L zIr$GaJWj(y8vDWnp&>=nZPvnLHuN9fKz=5qodyIS)@-+F`FQ36NxRIZJ?a&c>nS#L z(w?uG?w}{#S4@Y&JKzcHxAnL8>&8NdOvmr(INPN6Tfg}UyW{t@(5U(!ZQf5<3-8#_ z$$OLT^0@JPu}w?A4~f;8kC{HT7aIG}#VpqO#dy<(Se0eR{zumS7dC7h`08<`seACN@}^!r3$L6Gx7}+ogfi2UHr7%> zYK!}tSUG8w${`-Q{$|))l}BcgN8k&-D}3%o;Gx9y#5sccn>t%D%RN0E$=PX zI|p#21-RvvZE;8HByU>u2gFg?7XQvBcRG)oPvi+x3l8AVl=(PPCwbF+bXY26gZ=@= zIK--L7w!3`DSmGN%n;j(vrT#ha2#UQw&KXC_C1Z%Z-<5z_q?_6kqw=S!_)}`hst74 zr4MM$wC~uo3*RuguCbw$)(o+|o^+Qnomz0%llXlLY~*_$!Kwv?5R%^92!%8S-^4;l zGHxRjo*2-&LdYNO@Ju{Q{#B##vmBS=3Cj{;Dk>*v$*Y&3qm-}4DPAJ=m#?QRXEJ4c z0eq=tOJC zG4NvwL&>^ShNO!*iT_ApBw3fr2q%aaUN94cddp0CkJvCP+nFGS%NZw#v*nBv#5Hop z3F1*X;{@@EoN|nrK1ZA(^a8Rg(ILts%$2 zv=oMtb*T(Vy*h~rxp}^$SpVP7scT)7-*0P7ujD?*x&oOPPyI5X-IQJ#G%p z1o0AlV4rmmYc0Y%K_u%^tO4g+`belEg7`Qrx zp=4c3LqA$Wj)Ch_7)sWqG;{=p5i8p}i4fqPRJO4g+^BpumFyeEZ` zWL+vFxc=*jFX0{KS$Idm+F1IInJC6%{5H~#9YV~$I(!;a6h#@ASBFpII|?TwE?*5U z^}ITK`cXjcfX2QGe1>_kXBS=_7S@h>^D6Q774^GbH1D#0XPx-ThW=%Z@3JQAyzjCm z>%8x>hW43vSsk0dHQILHWlh$3-(^kKdEaH72;1xyj!oa&rt*jgzYYH_EsxQv@OI%)5?O8yrrN%+c z#5sQv%Zr-R7N{$!aZqb<&L3OzqB@-m)M#oP)U7z@4}*D8YjH!OS5o7kUcxzlyT+hQ zebKLs+0;0wpK;C~vGJnlSH_9dIH-bG@XmBnebKLs!PGdYp*ZJH>3D6?uZ&u19MpWA z^GAKWsNZG+HIW(zbw1AfCzY|&Q7Gf7aznJ>AJ9)r(*Fy6O`k!HV`V?i`HMa#FL@DZ z$?q7q`YdDRN9g%mXkPt;YoNb?8plfBe%y8@p)RilY9locsv76~H9N1ZBk1ulCa+o0 z^C#rI`m^rAFZrAz`qkS1F?P*xx46>I#HF8^N#$M)n8q3T#2n76d2?W>ubEd@;**PC z(HQ=ICVp`F#hSlI<*zaE4-j91X7CR)@qNp;7`CLT%yEkcJOv=QYPpG9Y7|*Ul!r35HnXds*Wnl4@L7h8I}VXWOu8U90+6 zVc|Pu=jb;$u8e3%$GC0y-Ek!uprBxp6h^5|>b-Kwn*qih|>*Qv5;2&wT_!O7d%FSM@?H1m3LdtSlnv)@2>ga^Obbm{NeE;hPhq~ln!PG8-$_U6L|ucQa* z*l6q67zpW@W3ZJqoOEf8Z!|UBSe^eej@^m$C%}VTo35kd*qDX}h#M9;-X#O{Sg&{W zzrxN&V4d_m96Mthw$aWO$4)XpuWn*zz?&fG0d}lSKh3c-uHhlaM6xbpj@QH{eGzQ1 zq9&ry{&BXVmO{viatzM4hLb94WwVMZ$deX)73p7t2b*oWs~pEBG<@JVmaNM-si|XK z-ZGBuqGJ=RW0?@rF~{IdsU0J6Ueo=~^NeFlkp6dgaHmc8h~wDAhA^%x4e2;Y$4>@i z4E1_fzZ-UnfOXP8=h&Il(2I7uI(Cu)8OJrT<9{13;sg7EP5-rHXG%jY?M!y;Bm*)g zd+lt}Q(%bA80O_=%4SUp5muCA@E2=1sRgS6Z(32Gn~FLU>F+`2qu)1UZ-Nc|GaH_! zV-Mq?>h}P}EBSP*J<&nUYIqM~smWxWzObq1+3y(77t`}uww-^p?mGrgvxbx0-`>pq zntbViQ(^QscyPW=cZK8F?1lg?s^VC(P8%8qOBuUK-)0@#N5^Jc$Ige4_39YBA+=-b zyySY_WE^`L>ErO=ew%K$?OU{6ak3q_447H;dKHdX+W!Kh}`{gKFe%%IEzkshPoH z1#Am^1Y`{!v<6?c21C|RG_}F_rsO)@VGRy`&y;&25dQgK2u7SmF0_U^r8c;-nZa)Eew=4fRcJ@L)58E5u;^14zFJ-EWFbcZ?1F$26>lxS;_DRj=Mh2gRAA zUF@KyH|!+pG909*CIfUs(;+kohB)}UpKt1;5^>hYzYbI*A8>XbsAiCS3poq$ri1@& zTOT{E!Ohm-8f);i1J$&n-ak;yAlcyFW(Ir5Oa*^$4IZ=xU$q85KTu6O>c0o786+Ew zUy$tKN^7v@1KS0FU>E3!IE(z-fojD+|3@ZF>h&v|8GJ?z*6;ej*n7gJ%l+7-cdtC_ z0bm9WV1IH3@^H_QI?0<>ZzQf2+CBe6la~WJ;N;~D;qPr;4xq#W+`>p+j?_usG_PY# zUK?y)4yg2#YPf*f3rg>dw^7_H%<$z9d@^WVN=WJdMpvnT8 zmdlYk$(!c&jmc})0aJ$#=xHY}XRX&80G7)EEKQl0BXyED&8t@jjjF1D%I5WnHPQLg zYE~g`5H{;=HXBFx-;~)nQYU58Y*v_TDnBwsazK|j**NR!767nF4xpz6G_5{I>LhQP z*V87iYiwQ)=v5~#X9akM&C3DoNST)-b&@yDt922pPo8Bv0O!t`OO!Wl7LM-Ylvy}Z zCuP$trb`x=W$5`P)e- zGF?XdQzLvqQTYu7!`UB|-#@rSuM-oO=<{uOsSURyBs0(@`WAcU<>xQ^M5(Kp86GP~ ziI2*Xyb1|MD1@u1d)W?L}44|+OP;=DsG~sM?&!jGLKsR z3aJs#n>VZV7hmEwb~Y|4pH;J~s}|pw@LE)OSz}y^B($q%^lni5Q*AHxJ8Kfu%7g> zA+~Op@ea3{96yC)e&iQB!Fm)-7r}IRGfY?NhF&;7Uvrc89seG+m_|=Ha}d7Kg3+G#t$j3FLmJ)V+x9Vs3*QULodb z9v;ErQvDu|lS{N(`~^<7>K}1z{2_sZOq245F%DY44mtj%_-GXiuTO2}@e5&Sc*@N8 zH_v>Nz6|O3sqA8=OYOktraK(6VXCiY%}(@ov9Vb{1)H3_I(=bMcDJEBd6`XteSkL| z;!{gGi=Z@fP~WUdbrHN8pRzPh0GqNZ7c))Dk`#Su{5jl)sY+ru)wk2ggw6UQIKrY_ zW|Qo2k~ZDn*avviqKxP$MZweu3!m)3hom;;T{iUh^s-o7u6uj}Qb((Ms1Z}2{RPGr z`tb8s_Non|_9sv9IWQ1xLUl2GPW~|p`@>66Ii)q^OPk@JL%wu|&7$BdQ$HPT*qFrw zHj7z*oyF#6S)5hYn8lkmi$&ISeX}g`Fo&=dd;dC%gUzyNU+!kHNuL4>TpGu>n6?{_ zINPq@*QQuQZJ5*zCj#EI?G|=n+nFXAX%ii5Lo}OQ<({x{XR?e|Y}Sw13?1D|$r;L8 z&XGDPo96wD?*s%eNuhva7o*RWrP7UfQU7_rC@;hh^Q>VTuBT6ATB{I~J z1g5M);$91@gN`IPrC;x6lLf(2)X-p&!yks z=k5p`$wGk=4DsCF$h+4ZeN!erO+p+vBVR^eztbC`z6IGqp4*Ymey=AU-Y6POZ-3Cw z6*h{_qPsupF6E7)9Qyl{uJ?$(r5``**(jA`ekpoFuV3_St#OGkIhG%dzQ0Ys4{U|f ze)LL&=r(`TOwSwy#k^F|imuIuJ3RTWXarn4hMkH6<6;M(a4s8WMlxkG25u`2R) zH#e=)J9H1Es)*}4bS(o^z8!iPl2w%G8!?8gFt9^UM`x?(q^~S;g~1*A6*O)|x&E)4 zj|%V5U5TvF8=$kv<2&>@%>8ivGiuTb;pUfUmx?NV5nA5}cWu+pu^M~p2wK|6M(@y9 zBT~^%|ADS=WC2|sOJ}8TMr%}z)xG+FQRG{x^lkca_+2qU&v#|0^kvARVv6pD@nLX* zZF&?U6-VnqaM#FGIe%qlyPmyxv8q_0@5RjXfiVi%(z&W)px+(jp-iVO8G02`Tar{09- z8Ctm=Lu3LfLgUfkPCXBGH?-g?jvZ2_Uwt)u(H5TeH#!%dyUR^Wx<{LR?2$+ z`v_MS-l-o)r48+KBW81(HojADK{p%P_k2u!R#v@JSD^)m_Im@3Yh;1YlpM@?L;L@k z=ZJh$cE%wYP==PYpq=_csD}=`5hbyKVLicrTLD=)R^6ME)oUiFIuVsY5tx8 zq)qef$$(<>DNy*L7*5$2^3)pktt^9NizwI7z-2s53#JF}Am|y%!FS6OwBxf#VXhZzDFYZCW!`vUm$TL+eX8|twfq@yoLD|*yyw% zJ}FX>nBK#BE}j+qn)!Cnk4f~TU=6{=wnU4A-&5PkMwbScV`xS?>m^dahM=EbRcMSA zdc3~c+0|E_~0n7mTIDe(#^~=j z{l4V?xK_RmozSjF>;?X?sYB7_=ICKKZ*dUi=r;3k6^xE+`HGzPP=V;g&;wWyMJHVb zNoXjBSadRhD)XUYoR6Q2Q*?sN9vJ@7BX>dLD-NhEh0NHMTAhP~U$y9rc(<9AD%>V7 zlfPHnCg0Si%KQn{)u!M&Nf=b=!32-`Tl_)O9fFxp!o$cO zQ?xJ~S%-A#!M!k&#a46+Mk)Qv`qnGWR=)nozV$R#LTl89`W1a6STHB^2vlFe3AW0z zPn)|?70flUfNDJ!z&`;D7o2Q>_=R*Sr~xiCv7l;wI)MIh1lAj110|>t16*ceLFMlb zaWE~+jvAJ1R-?w+58O<%>nd?N?n<2cTcGZP8NPX##=CAN?yyeP@bh=yLr9NeXA_r*R!g<@AgE&sdJl0}QAh*TC0Y+83mqHLc6!1+#i?x!;rTuGJ*hX4<0vahV z)Qge4Q15pcWKR27g6Rq~dtT;@4id~9D8cbZF$kY4$;Yph%n17=L z3m%c+WZZ-uhUA4-{61d3m4aBgoHdp|?udQty^9 zq~E1FfBtXm{rL+xrR3!=IwZon!a~Lt_mbe$@e-VNq6AAXlHiPgNKpTn1k2u*;Ot){ zSRTc&&&&T?ZwXc&Bf+Y(C0M;tg1_G=!J6F?tbI>{bt=wu>x(2fr=J8HYbAJ&Q(0dA zzq@u|@WLqZb6>T@UOGWyFE111{!1lz^*ae(yF-H4_et;eo~6T^*v-C-QYU@&l~=;9KY9d&R6hD^Y3 zmjtT#ZW<*hfJJ0UMk!*^n_wyW9TKQgJr*iQQNqbj9D5ZOnuvf(z2H?Fl9SaOa;EN?X zBM8KX3_=i$9gWFrWjMAERlPDE>&zHZtRoajK0{`^bnI#5^{k3rfjU^@i+zQPTDdZy z3a^7Zn=YX{C&ykDcTNGq7u$=gejwJqi0<_4O?Uplv>T6I-^(U^2?_gcVZw`J!v~|R zu}!Ej9s5@mlb=z{&LQ;f16kE+*QKcF+q;N4+IX=L7aIHD%=l0iII6&hImY&Tp@hYrV; z^@*WG2rfE$1dMgl4NQKjX2z*3sQ9TcaU3c|cb!0EPofDkj_@J770t5irk|kpR81FC z0aSb|G#4UE-A%XXjwXLf)4neGeFBfRHz)3<>>_l;_zrd(9pzCDDn1*%W-jVVSG-Bo z6Gvb%aMn{;H3_n$xCTO1@gsHjqQ261&%x?On$POGwuA+JKRQ|ZKUrGkk;azNoGCnp>Mtzy1FY-d~a4Nv78B8uq0O}mn3mHbyCW+ zI97F(M>(jjwQPDwlz5Y6Po0KqMcTJ|Dia#0N3G(MVd^~AOgB9O0g%i_m%Q9)6j|R0 z+1Kww**NWZ6cD=C*!~y|)gy5H zo_2Nb3i$3D>tiogey;9|zUgbKupB;FvNx8cq7q!w>4nwM)uDo&iB>9w zah2d|PcMoC@HFROI}=OdDlP%_5tyyBluX__O8Y9oRiK`W@tZaD21z4}Q|h8(w0I){ zOFLo}{<=>`8N;geUMA&|QUBf(npcUp$|^1)^&go2Tfgb^%DJZ0Z)1jS{kB)0;388$ zhzYdyTV6O9oq8H(^wtl`ZJ1-enDTGX*;?=Rn2Ayh$GEnw;tEy&jY-@1JQYFfrT_^p zUiHzKW?TO% zx8CUW^b}g7FRJQWB0ghQd%XyXQI;POgrz`SNbA!uFk4^XV{)f1xU$yAu%Hil>!^?Z z5Hm{K&QhIKTyX0Z1m$~bLltucu5)o^ZT%G**7OoC$MsgW-N)9Tr_#7c*CTNCXnloP zHfa)Ev}@K>>#G{YMpw+0yuO5OzQyT0#a!0wIc)EQHU*ItgR0^}Uq6A_vUQcGG-f#= zZXge^ed`s60dlLjxWKT2=A(K>rH^to5qr?dWx_=h=Li z)U++Dp|7giM96`1K(6(=B4dp@v`#-G_mfP8MHFW#QJ0&8ANQfiB)G#LY3r>F|m=E zo6uIdWABh0CT3p9*fGV|TT5j)bvrey9$>2i-Uv7<73 zWFuBeGIGW_clQ z|2ME>u_?9`A$E?51u_pHN&Af^7R-DG!?FFjCKk#p!&q#8o{5DsFTvc~ev^r{$?SsR z)&6`F%gu~phH8I-iREWr+6^(;T<{ggGhZHv*u|U;e8pv%Eilcr-@-oQEAFm*(=o=n zB(4o$Eyeu`Twl0X5lri&ccUM*e_$OnImN_0J)`1*ZviSU zI+L4ae(u}_GOBp5uJ5L}qvjT!y*5^zLwQ*iK7^}Q2C9WuPAZM{E720(N)|s(6?hKM^Cgk z=47uX+(^*>GX(KPUL`GIO5^^5o(W%~TT^Jc?VvleFwc04+E4eyM|eUb);M(Kllrj=t+rsw!@o=&v#0qRVWJh?i0tTWQ=* z(QjdLi0-ryX&0;IE{nbxzD9qrhS^JOlXCAx|Hjr>m0aI;;^(%EK8`K2>n{-8vC&`A z!wXV+Xbo{kN54rg_au915!~$2wQRTzUWB;-aSKQvV)wtlxsn@0x+j(>(LS~TIGUr< zcctzV+%wW|?`iHtMep}Cir8UXRdM4_?-`s99)x3qFeQ3klUAirUo1y8N{ka|b0Tuz_D+h4gf7cc?DK{aF%IG^j*As${`!Sj7jx{`2RrfH-QEa>c zJ~{Vxvnx512dcRDs%H>$gErZTdyRdhiW{+d9Se86zDjaUM&w|*rL(bkD`>EFO$j=C z0fK>5+`H9xOGWi)#jec$hqCCuRseF1pPO_ zfNp$TV@4>uMDz~U))Cwj)+eC03gUk5E8E56TKHMDJb%DXIk@1a`~JbiMYG0FF@NC%=go@xmh#KTE#tQeI8aw1*dyw zJesu#Zb$381_H6rCTH(4DfRPHN-a=Y!?CeMA89~cbza*y0wp6`fQg4wO$@3zZJt9i=9 zIaALAtYC&VGq{UTReUm_xjS1hN1Jiis4V7#0^J_Hs$dg3AA60ennu~ZSpOH~_&sBO zP+l<>h^SWG`Sb28iCNqQpCcTcjmZLwJt(6ca8LuTp>FP}s`C#I46<8?qOt0SsXk?? z>ikO&q?esVkGkR`s&_mB%fFwE6452wk;QFkhRQ<(b;HH5v(hWEKA_a@O;na*IiT00 zaXSa}bGT_?GS5&jUQ~`D>L?ZCNaDsZm>rlFA6{ z&yleQMaQCd;Gin+CTatHZllK^VV-R3Xg5|_<%`t)7elvmd;Q7~r_@rNl?SN{VGiwF zs7sK})ELv5#MOFk-HEDmkv<7cCe<}qe}aD2xm16T))I+9sv+{q$q;$&@4p+PTlkf*W8W%+qu^9rQKg!&tAyhZj+djQ~CDpUkK3 zVSJ)6TbB@lN`2Lp&9O4`glvS~hAUdpd@FPwDx?Sbz->tI7aYSp4R?a|@EBy39$bp! zps&%X_!KW#+-D@;JPp_vSRvMW)RzMwX{CvTOa6i<(QkCY=%F}{p39oc!9WW{bZac^ zSbKh5!d|M?=nby6tzHEyR{J}(W}j~K4TL<3eh_G{S3vHAkwycLQ_EFjAg)hE+igU1 zHEIt~`wm82phO4JOii?7ey8>n^xr_4Zi7ZZI>}g#E@6WOqR=Gt{LZFf9jm5?uurKm z^fYhE%CD%LgOL~L;#-TI)qRI~ z@(jWWqpb4fp}P1&f|F-uDV0~?79=pxcS>@~A+B<`ZwYSYo22BHq5cq8sK9vN&zOWb z=v}jCQ^~bQV2W=Wc9WYZFQ<~%mB2B+D=`gtmE-?O<&`*;F7$mS*A6pXy6vpSzvVyz zOMU-Fb2@EEh^qXV+6&=r;A~%2-z4i%%nADKIHVuxDE4)mBpu(4+74)zz*gTU{amf8 z979DIBQvngcNZ#)7ZA6soM-fxEug&Jw*sApmp`jC!?yAQ7xA#~bksiAflYEAw~L6` zcw6=r-@h;qG&3>oB?s`SuL=${vrzdx0UzM{7x>Y)6eHi;F-_-=;>unu)L)6gmW=QgZM}mn&Cow5dE_bD%?>QE**`hugtN~@Z1-P<*}|Ktr)a+r z_UvpFZI3^n>mxJu$(79bWg0Nn&B49nZ<$m|F=vW8(S?2Q?~lo)S+0b6%X_wyp!5{Y z2g1z~cnW>3gAL>b-oUs?iN$)~h4l!8F-Kp4su~92YR9~$<|cmi7sMAU<201$q2K8sJw`<2QXX$R|K9)8kFL_ z988rz?PBi?)TC-?7wfI}U!7E?=jlyt|Dr@@e}d^%WhnyV(%XRmrpMH^YKGNR7yDh{ zyrd2<-ZUOe{jPQs1k#>KGR`V8ZDGoLw~K9?HVHk}>qsNk({0{&6Ld*yi)+eXCs37P z=uIiB(|TYgPmLA#jpu{i1Sh1;zzmT(!A+<^vv99G%}r33_7c|8sS}9%rZTT}6D&)6 z3#+u$3EYya%KdJFHEF~DvN9VNqodQ&5%oKqoHzPnElm!8v6M8o+oSt1g&vF)TYUp@ zL8ebBQZzQggyWcEJKQh5{;Lb`AWT_V27D0(vo0T=d<^btuWJq{j*rK#?V`U&hw?~hwXuQiW@JsntSng7=6rZ6{C+bFdTHt=Tp ztvL6Zbg`=P?=I}?^hd)@VIBu>A#6Hp@VE4vl8Wnb@HGckI?zAvf8*dE4mOaJaRp|v zzi_YwJwuT|dKSm^G9xuZ^Y$2jhH*JyJPDE-|rS=9g^{yOeKC`|!Ttph!QZXfZc%MNxa0;O<>7 ze)?T%KK*9tbT59iT`He`CpeMoUi|XA)O`6316ofs8yR=c#hk}|3|;Xgk4xJkvxnMY zo-wyxMFgvg1Ju33G5+vg^aZc3`)|~3M2!R<4G>+ztu?&v8^NVi|Hw7jlT*2qQK>SFzbq$fzE6zpL2hD44>m-siHBhP~xN zZfyb)oPqp6$ZxIT%WeyCo-9PUy5jy<`IC1C(X8~-cxycK?Ld7O58xb8F@RX|6qUbs z2z^JAmP}v@RWXCQF6bELpKmO4MxE%4or=E^&a|Biu4Gb}xT$}0$XmkrWJFC2>O zI6zI}DS9&7uD$*qeN|lS`V`Spy93c4)?+rSI!d34VP9UYuSI|K>!b7`rX8x&G0ph( zXdnIklpS`sF32}iDmHPYo~G)r(DBPh>D#fGb*)N0HdZF)%4awVDq$!+*cZnzn<_m> zhld15Q^3=7IX(Jo#MxVd<#+2*|HRU>{4tHnG+mOPnUp`N|BcnA{mf*{b-)g^xrMdyyf37&u8 z80IP7h{5sb%d5eA2~QNLi(RsO9lP1rwb zCT6nogI1sGAN3#`^S_$UTk%4rzw$}Pc# z6_Mbg+!9<^(YKr#EWw2pk>H}-5?mP3m_uj+{S|Jb%Kt}~Vnm=8Wj0V_+F@aPK6r%V~O$UxWC5#f369gcmp84Af5S>0ba#5B^E;bV%(BtA66}u?WA((yL%E zCf`}>sJwo70BDuqhg$lZZ0NqC)at$w-7M<9rS5YI>;v{hJ;i^gxd#t4ijRH{;?3|* zC$?mc{1;{^d zZ!lIgX0!QbNme)|Hp9wGT;&Zu4dp(T z*(gm%th|eeA8@(pc&qOY4BIBj_q)owd`qBgp8O{wxDns+e&6+Ic#q%Fhv12-%%wlv zB=5M}*98bq1{Tqeh(ebA5#I2`O*U9BNdUE9=@~ zoL-LgjDOb5nTu86-W#A%wS6ExvZ-ZNI}XB|n%YyfGa=-oS^ww+y}KfM9vaF-rz5IP>V7HWvsKKb z4B60EADmw`0`Kvl2s&(q?HiQ~Zh$>~l&a;2v)rxp)Y)+;%0!6U%*25o*+zL#wJQ+i zN4lOWIr%KEGlt67x2Jptz3y=d4u$>URNUs)V8i9~p1-T93yHiJ(S=}b(s6!t^(SJB znBRPhUSvbvv2F>(i|6Cep#lX~;jeKzB={YUVJ_ioOGo0v$x6pB=`Yfu0>`JbL(L%- z3eM5(6vpUept%*}DX>AD#*WFpx`MOJF-}_o0I}C0vgDYHfv;hf-ypj5GvxX+innqL#KY@y5Up#; zgT_HE?TBP7^FFG_)eWPjFAoVEPG|WPatt-?Vc30Rn$f^w*>he^I5;OPu( z^V;(u|EN=wI#RW_L&(=Q3|J~|j`;tHrplW95U2b&+CN9i-X}=G94U7nVdgno()5!u z+cd#3O7S7H$Fk{%m40^j0&Qyrf zosTU1YbRL~cjB0No4E75xU*8z(nMY>YR?>PE&bP8_%)@amC2UM4rjsCiVaB4dYst7 z^rcxg)YEM}zG@xcWwLwFW_P#6?Y5yi6r~5MZNtC-JDn`ISvi324#25=$;y$s$-VvJpk%d( z?cnI0(TFyA&t~AL7N*R=kva*Rl%;PEX&<>h${{^SUWM9e7||c%#LVH0dFF6g%FLil zCJ_)T$Tjm3+;-rc91bc>q6NFek(Kju#7RkE3^Io042dUdci}*i1~WHFR>thN9<$$y zSzLBIa(_ zGsc=A$;(kY0aB8G7;C8(XhfhyT-4Pr!FicmdE%pI$?1C@W3%iWF8of-n~YRPvdec} zh*_7>y|xk0EHNWxHCo6lfx32pdN-oJPcfaAUC4%7xP;50ZXKa{51JJRku}$F2dtG( zY^b}{9Raag+G>(gIVe(iD>Cg?HwLG5gLvqGgIcA_p+5`1b=^*!FTNCq8Aw}q9yMoP3Je?j%VJZq-)xxn zovwSF_=j+K(B$+P)VFIeJ;-*ngW_JCrUyv?Qo96oUF-HN#ytH+hDHNrrVkJB zH4cpdJ~K2(1Y$bMtvDk|i0RB?$|O6@lk7A}hOWP#X5CJh>h?kq);4KDM!ExKXbsZN z<_)Lvt-v@f+sNCOQ=gau@e91_SAB~<#pp^~A4@x$R(b~wV)|dzb5P^sTjd+a9()mN zW62O+XnO5~<~V3pEXMkW{qK5fT06-78wigWn;fXk{o6f@k9MDj=K+pac zzZ@NDpilbWjj#;WlTeLCcUx>!bqm=4rkeiu6?DsKmbz+R-PJf>d?60+B5mDz+CH-_ zoMivIt<;q4K^vx>?rZFSeS0FW71e<9&f+5r`P_!OU*nxcEs?AAQb5^L589mFp4!S* zU>x5RG*li0iU`Hx|hr}b)0V(-=$nArcP^6x_4eFI$5T89S}V>bQYa?q^N&!9^j z<{!Cc4$|>A6)r_hR~R;L+0W3`YlY3b!mxRh&;{n#3Y&L@Ve@unWYw-PY~H?7i+iOz zWQNV##gPfT!mxR}GD&OLyql(-C2Za9WWlvU<{dX= z-bNN)D`ehrL*{K{@%2LH9XDj&MurZ5&q?sbka-)~$_+y19XDj&MyB)zA@h#Mh0NOu zeH(<#J8sCltuU}b$h_l*%-afs8-&a|Zpgf?FuXy?yyJ$<+X~|wgv>i`$h@r(Zj#J9 zZpgfiaMxNP^Nt%bZzCJMLCC!0hRoZ@0y@mK*IFU-jvF%X;m1g&uN5-yxFPd4GE_Rr zyyJ$<+sFd=^%cw%aYN>9WGc+7<$59RjvM0cE5)d9J&C*HhPc~^f~a^9cgGEJw~?WN zR4dp#TR*TxDI<%ANu{@5A3t}Vil3|VvGDp2rkOyPFVvTN!X9`0geI-&PhlM+m;V3Bk9O1<%p9;@LQTH)&@a5+V4upmT)ayPFVv zTU()#B=}Akf^V)^@C@b_%;gC~@NFgkKc3T!B%OD{5Pbh3v%4c9!FN+husT`tG`gS{ zV+g*RO1|uoj3L2yQ%qIVk*Gb`!1AfB{qRIck49SyvG=oR_<1O$54MCNH*HVApZ0Ko zQ57IXZrVeEG?J*bxNI0i={?cZ!L+nLa39vLi$ct^_mgl3?Zw5*+_2gO>Aif(#Z^OR#7tX0Bk%y5l8Syh?&oFPGr7J0)29 zt^{ZNCP94`I&-k)GX5?`u;tl9C0IU7g1=oL!OA-&ShZh*)dwW_`ymO|WTS@$TdwUT z!MbS@tUptNb1sr#<0BG0H(wn5_fM#pV9OUi6X8Cx0|i^Y(O2}aM7V9as}#$F@AxJM-z|E2^J4l>9c**1s4<_;2E zR3^d2)e>ys4}%1AFIg$UrB_R^^&tr^dtHLdf0W>gY%HdNxmR|P;Hth7Ts>QYZ5t%G z=2i)|zbwIyZzb57(T?e^EtlZBkrG@#SArYYOK|he65P5^g4@57;2*8<#3z`0XIBaC z8ZW`!3naK_odoypl;FOHCAj}B2_DdSO!r`}1P}FJTYH_CpSv))J_SWeq4fQK9%5EejgmneXd-B=Z8x0LahY*8zgvZ ziv%C+lHk+l5geA=LjOm?mbzsj;~`yvu!XSZwh-3bmXBa+5!T#pw}yl@HzcgNA;X%R znVx1@bF+jsH+HDieKAa2c&RW<*4&t1--fva@z%na8^g>U=^4iao=tgn6sP!WBjmVk zc65QF4c{QrEpLQjA;-<{9TIZfkdWhsgd8^{%R zkp#8FC764>1SjzwvS7RUmmr89g!uu?y`7NbR&LD^C~bl0%MP$?<(C;hoLxwJJ zp@{`m@kIixLJzGszy?ZO;4%}#FDV%I|Mb1AuweS5Qg7+4(Ib3mY5dWUa93uYwg-ps zW|~in!ch1^Ipyzzgty2ke;*_)>ytF{@#PGB5wJ9J^5u+s3#T1hVwu1i?Q{eVewA*1 zPatg`CXmB7Gmo-P)hPMzzRmy^1`{}T<|2eDQ-Q2S6+ad-A;}3f_^8v_lpw`y1m*EF zsU}HIXp@$6)7?9gtlsyN1J4Eu5pAbClvI1+EN;n zZ7B_!T>zBo!iA7<<%HsXE~ZR(FnuDI%Ol~+34tpoo2FEpYbD{z32pJ2{$rfs;z_u2 zLPdUd4V_Jw$+_?Z&xK6yluFk@{#PG|lHla1ANM zIiUl#+nBoM&Z7|LgqGPFW0u(z8>b6{58*tY)fSga6FGE3i*0=gm^xL#Mdt_T?=81? z<1$p}r&^@?a*y0wBU}~>IZx>CnuD`>ojpgyyf}(GL(948NYXUv|G)$v z2*kf7rZp~gp*dR4y%@udmCMDsQ1ygB)ni+X3vZ$73FY`b^^?Y56UypV0d3+p-1FUl z=o_s&dSaK?w*_;wxUuL-ppVdFPp&z13!Hoi-ThciA(k43ir;sBA-e$F)l1uL)%x!FDYpCuxg0n!~^Q46Mu6jKI_!8FzR!&M(G! zATqJX0~k_~NokPu;I9cqCUZ@pO89F+k@1}N& zugtX0H?^lq_)M;KL2Cg9F{^Nhbbkp^`N^PkRl=VPN>}uVc5fT+(jj1cmR%!i_ebzMATJ9{Ugtj~jwB^~1ArXgFj5n@ATb}i*xk89C z&Kp^wEzf$>PJu?wJ6oYG&wAS{kMUMlXv?$S@|xkTuh5ofJt*yM^N#;Y`8TM#tlb_n z*%ZSu79`I5Wc|0!fV8m}Jefaf-4r0kTWFyz&-#_l**VEfX}qr%+VZS9K6boj*CSIL z-ggUaIe6fcY?w^qyd@Xf@~n+sPlv&#ot>T_g0$sXd%XyXQI_WrZMmtITy8E1ZFv@G%S9%OT@~kc zgV2^|Re4HdR>I;2TSHap6}XCI-70k+}A zvSbsk$I-EzH+;ntOD<$=oQX{;A#Hhdf{9Ho+0NJ`6FahmwB^wwO>9aDY0INWnb=V! zJy09bS`(XELfZ1^3==!LgtX<+Std5EX%@ZFzJ#%Vk&7(zUCEwmiD>C_)N@Y^Y+5ou{RsEzenBLfZ12 z4eVHK3euM6oMU2vk^^ucXQPP)OGsOubFPVnN=RFtbDoKXOD@3;e$FNnYg0nn@|^Qc zEVqQT~ah#0dBKgs(j3bMeF= zV%+x=zVe)}y<+YI3SW87vu4I-#^w$}hLL!XM!xdMOkSSTxi$!7#JN={eC3gjnu`Eu z)i{lE@|8!P_8B=PaFA=oIJXpquRQXpPws@W<#vY6lr6Y8cN>MGJo1`N&$YW%at~4{ z$|K*{xk>Hrtp$Swx-Z>cT;%DBhPq? z)Kz%NBQF|R{8*yMOCFgji!OU%l&O(AQ~~Qc&TUcQC682DT+=(cYyro&6BfrjgkPj& z_2_I$VB`h_`C^G%s=`YineDa2MV4tmV~Ja?!b=``y?KptBUX6HBky{YD$bo*;U$kO zvo#`KN@;ARao1LO$s;>0L>j|txtS}xGQHIjV4r#d*;GR9=RwfS922{ z<8Hd}l1DC2a@hj7&n~>=k&P)W#CH-vUh>F1Z*@}a>Cc7oB}I(;^ukLX`J~ZJ*%VOm z52+(Bd8Eu|W@o7eiZOtgd7K;j!b=|M?K3ydjgT0(`-PA^axmFbu@UEk03jrgbhV30 zH?^H__>4dZ$s^r;M z5Ryk)TDjZErl#Y32qA>zkybjn0L)9p`FuhM$?=Q6sS$j1A%x`k8D9@V`nIvbry4>? z9$D%c8kkM_oI?o7BUd+3#Q5Yx2+1S+j7f1^_GV&y93q6|k^g!6c1-;Fdx4&3-)9ic+2 zzSp|4p;nfCJv6HC9xBLz8y{&E_8CFH_oyKUZd^EUWepM8MRVD7RrNdM0)zvXS9om! zM+u;m;1;O2xNzVa3*t~VyRmHZs{SL0Vjqed4qW51x!oX4^_xQkxeDXLfh#@SWTg79 zrH&l9al?V@O)Xvw5Dwh9;lTB1IeHhs&0fJpKuRUA*JSg(bLx1t!UO5OK; z=2Q2u0uQf_5P?el#g)ymGV{#q2;so(Fy9KDhc1OdK0fOe%tPvYp62t>P5yRdmBx?F z3p#MFD-1pZ!NB&FO7Zbru&~b{z5y09bhs32T}?XNSSw8=yz{qsvJB!DLWeu}Vme1U z+?b)mWwrZtB*v6#@O`e<(BWEbIko2gZ}5IXNQWCUbhzE2Yc%j3wWPz189LlD^twiE z82JhvZp_f(Hqj0#r5|I*~X4l(^=$BV8~g-kq$TJ(BYbb z8I1HQ0S>;FK+@sH3>|KBgVMJKKS>&q8BMOgH(m6>7GcbcoX&ruA39o6?NSMj)4 z(dZi{JQwm}YKYC_Nf-B)gLB)}LOymOA2xwRBpJPVKT63a{Fm$+HgfvwRIlPSxLx<}{NXM$Vo(R(6#ta=U z`;yH?)jaAd4IM6fkuzNkd!I_aiWW0;xZae*es`6I4%eHqrX6lKgz+|J=x{@6o;-WG zN<)XsHw~OTYmTJy3M@}zh7Pw$%EhkI(BU>oxrs{B;l>Od?pQRCYxWi@NrxLVbhu5F zPf^KBOw7>XdX>ZZcB0VX7Fjx6?@*8$9v;LDB9NG+!}UN^O)0gc!;Kj_+$PqCQu#Le zQqf0-4%eG>crCRZFulht9j?);nq^dkdqZL9aOE`sJImKxNhQC+j~O~#UI}eQm>H{P zkBcyLxU%~2<~&R&zGp90F++#j%)~IhOH97pn4!aMW}&7-Cjj`Rf6UO~dON1++(TW2 zp~LkeOy{2KA`Bg_7hyX0dLmZgP-N(Ey$F-b-7dn=;U*(I^G3~EF4oZDda>@rplW`1 zU@=37n+#Jm#k?eoTZRtTi*P3hHGD8(q{EF_I$YZZQnr4_Q%gGBBB8_e+Au{s-vPu7 z9d0thTeN!}Y>}bE^3>|JVOx679A`Bg_7vU*dN9?Z&9d69h;o73f<*X4` zGtR{sI^1UFOi}qD!I(32xG7=Y^4{VkC^B@o%@TMD{gQ)?89LmQSXJ|f3o~@M&0wBl zkrU7OYv^!2Q>R?a%|*Xv3}K|hjTt)JlqOZpLKkM}aFea6nk_EE(BUQ{%nG7rFJYv^ zjTt)Jl=)?wD*wsF8amv+#(L|$6Z)&XG&{*g|Cn4!b{>jbK1jf*vOxT&$8 zW)N0yDX*c!^(K&>C+!tKOvA8ORC0)$=q}vLx-C>0oMU$F*;cIyx&1@ zD>8JrOn~LJx!sVmHXdZk8+;5w#O7GX6H z5JfuNqP;?gJGOCcU>3iwBWn0-#F7rT$kO3jhv|!|`JUPzTVMnkI$XNqt}?>7ffqX5 zn4!aMqO72jbht4?hs&ElH{~%@^5=kx3>~i5LASJ0ELF48!Nv?7t{2O$Y+*OLFhhsy zg}EkG%?l2!$k5?>VN!#v8ddWnVWh*289H1q%#&${goPCuI$STz&D1(L(!s_I9d1gj zs;P5eMTQR73-dU5DPg3;jja?qTrbSy;G+(#$k5?>VaX1D;b3Ei4mTxM)r7k{IU71$ zFU;d$Z^B528#8pcUYN(hIS#DI(BXPv$qtfS)0i}LxGAx!=1v!8=y1I-kArUzMmpS> zp~LmUJPro%kWXMmh7Q*YOLnk}gN+$F+>}^VGueR^89H1q%;VrP!uqv@gN6>*3-dU5 zy#p(n;wK%h7nbbc^A0v<=x|eFG3p&yk)gx&!aNSP?*RvS>5myY-2QRIllLf8%>csq z<6$vFhbz0>7RGX@nv)zvk)gx&BHXFUUZ=OYSVM>V*H|+xn(@5XO=akCy{TA9s^)7K zVd!wZh(;d-J5owhWax1J8fV76%4h8r21`-WeAZB5&>ZV<-YaO1RQ3}2z!f;14^?jL z;u6x~# zfSAzXdUgHFsN;x@89Lkux7M`kUrjmbaASrJ*OAYld~hKn>y^bj7rks#)J-+ zmpyG0_Pd5U(&5I04%efTtD(IlK0y@eaElBbu6WbfLkz0FP<@jQH)iN?trJmHEEuj> z=;N>|F0ypEUTwd8YDtG16FOXv8)~G`;T9P>T#v%|Cmj?Y)FVCH*1kks7@P`0K6^tN zX;KRZu-62AMaM$iX9l0qq#>TD8BK!Rgb?ITk~USNS41EYf?Olfqr3KnLQ2DK1^+-ef! zCJHqj<*scG333yLAeS<{#GgZg+=L;>bwvC!Hem>I=U5a8aubFim+wI!Vdgp5>J)<9 z91`Rv3_Xqo5H}PN1*Okvf?Hj zR@{h;cQs@@HIr}&N;s^zRx^GPHJnKjWW_boY=x0xo_gn}~|*`e3A) z?r*{eRNOHU71#Q}oPG}J96&1Wn23sN?J#Nc$=F07=L7%Uwn;{32Rai8EH@n}3jSX% z4*vxQ=qLJnNLAV<%VCa(IUgT7B|*Dw?LJ5{O@fNs`ow5)%lQ-F{2zY&TGSp&#cjvO zSft{%@>OCl&6i)AMQhqg`34oY^&NddiZ*=ACCehj0~NRRZ7A%!rS0FqF{rq$_aY6e zUSdS^BPpFX;j+4!_}Y4cQgNa8yqTclPEaZ?hLn7xO)BmLrQ*JTh}_UB{YYh?Uh-p^ zZLo#l4>_f(tGmm{pJ(R#uW~Y1wcuh!D(;YTp36BFG$A>Q7W#kDy;FVFoKFZM5(yHLmZo7&y%rHskljmZN?mkJ#Rq+ zD((=a;+944hqP!^DsJ!SBjJJl_#sNgb;_N7Pi&@V5j?P0K18XwP6pOv8$3Y89imiR z&4tyo9v8y{RNNs-#qFKOo$vq^cZgDPd!;dtRNNs-#YLNYjoN@RKH)sk|Ai>q(6l-K z4IHXgfBy`H%n8QN55{%b&$s$C=HF$&}a>fX6V4zX9^?ta!K54#^C_`$T;&p^#eKZPFN!IPv!)%*UK>Us#4|{ zs2ei~rCa4U-4Xwc!x3gIbCCC4iQ>`MI!BN6n|$$l`eU)K@PL@An&2TzuW`EQy~r!m zvqn%2IUmkBAL-=$K#ntq+%H}e{ZVq*ZV%ccgUi5u*5m98?_wqA)_< zA#3G|$CagNLq3O|D?hAYvw9(1=7kEbd`5mY$p71i%Ed+goo1z=S63Y;`qPCebl7Y; zh#XF`4qo%>MWPnCRh`7VLnah&m&F0yZRJf8_i%t;6U{hZUP2JyR}Fq7L36~>(uj$c zhir@=St+@%-A|Gw`1Kj9e5qtS5?^8n_;uaG;gu86s=a!oAC^VsWAR90;pHta@drp)DgpIU^*$6aJ{e}2Y#Pc?Sj8y-54g5Ez zqmP%GusuAl?FcDkug_^W4UyLGm)OrjkT!#Xw6`*l^j4&6GcZPbD*`D9kh`}TB%-~Q zfE1|!BHCL4NCoS~h{`{TyckjON0EvVm3~wfQQ=3C7b7bBC{i(^qK|CEExq1*IsJcd z_a41HgQu1Dj$gL`Kt3wl0{EOcP+6>}Rn-Y&ywQJj+vdC&v;E}52 zO|J2>25VE^k=0dgE6ViJrnn=ktKN2qtc2Q>c4T$_wx6|&PGLt@m)bT8e#$zsx?tOR z$X8KER@b!|?-H~r=}2|AZ+;5Rrl2ELE!cLgY-_bC=SWr6UfZi^;)~=~qD?VJi{|Lr z`>0$E+LUsns;bR^9JMLrNLAIF!8mGD#*wN_8z>B058u9BUViiLk_Fmzn&8&Mk6$PE zj^5ES1*ocOt3Z$16ndl@e=C?rZOS}SRjLg%gsngswJFNzk2XELKGLQnBUM#xzd)Aj z;|Ve<$}k7WX!{0ftnuU+Syk0`V2s8SV`Np;+rJ^hVmv8ER^@NM6)HlEtSYtrQE8>| zWEfdhu>CT*>l;smkyUkV-$%M}JPAgss;*rCg3)*qj8s*%;3k><#*<)Vowi>r*P8JJ z7}=C+X98d}K>9iPW05B@oU@cLu0l>)BD8Q&( z1AtMv1^}aS4FE=}F&7zGcoDsS-p3?~sszHG zmsmAW0@XIKdoKwjHDIdsCD!C5aqO2kz+SYHGIdCBNfcbd;8Ghj?<_}|wy;chNSRiT zl|%V=ku4j`)P035lU#3I$P$940%#u~Vs zq;#bnGZ;GUP>P)=Q+)a>6uW*%amF7g&in_(XPV{2@%x`OlH#-bQ=Gk>;vA(bo%eNy zcE3pR)EO+`kLzXH5y;ZVR6avkQvYXLaqrgur;8|_Q>@bSk2CbbnG`SHLh_Q#^EAcoKU17v zCCgjC?sJ1E?wm;R?Sm=qI)man*HPU47(}M?EsCi)rC>}@U z-0=*}n@KVMP>Ka7Q!M-f#eVqy${)DsC5rw3M6uYH$x=K9(;x3p^quP-zGD-K$fOcJi40V zu`?(hSGv*)WMEX(_=}S#YUwTH;`9DNo>L<&F z{}Fv@qD1_+=}W7``TtE{8u+NVk-pR%(U)2r`cgOATu@66ed#!KUBYX)RU($?OWbcW zgG%%zD&kHN`qKPqGNuMCJAh(2w*LGE_^ zhzn>0TT&x(kw4NRS1Joz;$b=zMH;p=litg3_lJsVh=ncXaqYFRrNRuEt80&hv$R-; z!j?2tZSuGVTG*00ZpO+WcHE*N-{ikeN5YoW@nmRX#~m8_w_r=i+`^WSyoD|CP+?0v zRM-*^@fC6N^tU8G3tRd?4u=krF@P2whRv69*ivv2@)$m}OKSels#)UL?&EK2jlg0A zCT&2pq6v=^T?AW#%}YZgF?NU;8D0Xjg)Q+|yW7H+LPC}9k&C#%mNd3@C6+c3YzdX{ zG+KzYP6S(W`jQ=rzGR1{_Z2TQ3JanLwuD8Mr(KS0OasA6f0tQ}uq7mALado0*piMP zEesY>5o`%V!%L$`{4pVxRuOEeR0ADY;ngr&*piNFXmr@yQGJ6bENm&n!YqO)3f!Ip4Bt4Zxm zge^4@w$!AsrI0+OuZB{@;fYHB5Ubk=wxs#!7{YQlf-RNwY*yQ5SRO~PC7hhSTlF{a z*bqp@t2`c;+TB>#Qn;rqq$Ai8k6j;yEj19V)G8Mt!j{l*Cd2|ff-O1iV_{20v5sw& z?TSj+5+chi7QvR#{PKV`f-Pw87d#TN=$^3|rC~#jqs}#;~PlWqdfWC0zOv4s7WjF}tv(36i!8Tbj*q z3AXeK{Y$VVoSQCeNy9h9uq6$bU`rY0=998oxr0FP%%+(gcMqful5Gww#=eORMZ`4McX6yNM?VM|nnxc!e{OQqH@4cHGT!Ire{*c^ypOPU~lZKGk!z`~Z)#yA1C z5G-t|q-KMf2J9=8U`s4?12!2-uqEiY*fk-x9U|D0rW6~URqehOwgj6OU~|I4mYlkU z*sO?POPUmZZNFd(BZ4id5mB{+QKg9{JQXglkB|nx*{9VBgKcmUKz%ON2CjOmHl0 z3R{B0OG6>U9vgVU=V4UXk{Y29BLcxUU{u(WHYgNgyhB75(Zip>;~d#E64;WuKqD^C z!-a`pzt6&!Sduh70_^`;*pjQpexQXdIg4u?>cq|xSlALS&la{6V#_dsEnyL$oiBF+ z&|BD&>Yd*-8nG7{!Ilum)4KqTi51mJw{pG2$jd<}BirR?E{7fi3m6VwW?5Ejd*h9l@5U3y*`Bg)M1IavEYw z!X{0Cwhl>A>YOQg?c=Fm`5j%F&fz^s&cc?oNYF?k4~ob#C2Xxmuq7uYEV6V))|ALo z>X|JWzF5_(MX@0p!Inx!6Jm!pf-PzGEG27W8x6a+5o}3acuQuj*wl?+OPVl#Z9ihG zH-aryw6~qvu?rl*miqR<25|&i>Z^n{A#5N=u%(JpS`X|kN3bQwgV(AMJI)bosh1J^ z&=G7&I{>F-ngfwx`s3tQ?l zh44N&SlE(g&E|yK8PDxL>J(tV-olnj#pHBg!oS02VM{tYvl*b469}1y*x-*~OM2s6 zatW~QZ(&Q7C1n~R9tcFRC0s`AJOrbiZ}6-jf-P<6%R^a817y#j@$Qv$KvvGKMu-Ot z5o}3Y6lahL@x&p5E%n*bAs$9Vu%*fhATJZ*IYk6p>e~a4Fe2EJt84^S*@$+0nB zh=(c>Y{@Y?b-?462)3kqBc)*w;u%Z?Tha}X*l2rs13WBjNjGRJJSK6AdJ^}&Eo^D> zE%GEsU`tc(mIpN4Yual<=S@fmu%*3bd5tj0CqVbdy4hBjen2dy^LZEmTiQ!tOLVr6 z1g8E47QmMFB5a8ZgvKY3TpC3+Q=4&22f~(ch4QS!7CKCa17J&g5w@f$Fpo5bF}Hcm zwAJteY-ukGTT&06u5dF`PlX3a7<&=6gb`yiGSjYw?PmGe%fgnN*eu0#!j|^3u%(g} zXYZL(%t`(u5<82-b{@_c8ps0|v}-tS0MxRo^g92Fd;nV-a=6zB4}4bzLET!b)8}3@ z30oQ>uq8T1&sf@hJVu-3PmpL?9A@#PCVRVNWnoJg;O+h|L@{Ih1Y_`+&JQPd;ia>L z!j_=+yjg%PB?fr#l+u;Kmb!06shVXjNGNOx4Jogt;j(n3M2%rfl`eDPvIQ6%`Gwy=S?4#iV z*is^fEzt#yn8uHpeL?IJ!Z_oyxqx zmNb*mfz+gPdu$Nck{XsvI;Qi|*dVZ_lA$yRWWtMG1h&+di=L{Tzl!|?wxoV>zgm}n z#xAdwxo9EV^HJ#qwxkKfU8)H*bixI&r37G0^s#e7aV1RnTqU8fr9-5CQ8KJ+@zb$~ z!j@bQc2yBK^_qslmRt`Gtl|Uk0Bk9tuqBKon~N#F6dM(`gi#bt7jym!Bi@!vC~V1% zIVV$yj0#(FV;0B3xOS@Ksj#Jef7k6=p`{cPq2kxL?i zEtOnMaXPGkEhQATRF?XoFup7UJ|DrBoTzh;hZV4;L=+; z?}QN_cO(?Hge#$K2%WKt&&M7LTjJ`&8NX)EU*K{0@g||LrCv$QX&!1F6t>hWh2j)A z-~*C`!j{~Dse}8_*h67Uu7?ipow0|)mRt`V+}Fe74Ef0`Y{~V|T%L+O6t-09;mjMw z4`N@1ExEq )dU=@Dj{P}owXn<-9>Jn{-#ay>T4xJ(oC^- z18z?qE>(+QOIkHv&Psm8b+MnqmU<eJU0n?^)cQrJ>2H>X63lkcApu=$}4QC?^sUbZ;B!$6t?uq z5lr!#*jHgoefv7y@I)LzVM}fVjzs=weg7W&Dr~85U#H}SQBeaaZ0VCDnBu2mUxh98 z?dz2MoH&BQmimrRTGkcsi6ba%sqY9ZK7LH`cX0%TE%hBCZY5J}mC0NOgu<5kj(~N5 zE=IfeLxfjkwB;4Hgb1>{E?wbD14qw??oT5IU`u(0Eqy|aQYXy21u+0y$`iJP29fX9 zDZUxGz;z1q5s{YLi?O`6d*$ z)Tgg0o)fv{6}IHMIR*R=xB<45*hScq>*f^jrN}L>uqD^6vVfl4;cAi!Tk6x-6oQbg)O;mP61Di-0})ra@{HmcvIw?P}owRzNYxY*iB(euA5W9_u&TE zQbJ)%uA5W9wsF=iudpT8t+If#Bj1F=miqKH#iJv)yuy}TH>ZFX!ws;dgu<3wH>ZFP zL~eP7ExB%$1$-m&O(<-sPhUCf$J?Cq3R`mBoC1!A8(>Qbg)J=$B&^GFj(I!qm%|OP zrG&zkxV35BP!Chw6?xzJ$$naWlSS*b`_m=;>@sPgc3*$bk>g3e{=ztk`WaWq;Td9TNnBb0 zY$ z9P8A5U~2rq#AD_&`LR?l~hx;5NM?w(MqbRUWg~@Iii(R<1YkSDMz%D zYElb~2rq?%wqpp|k&E2+k!m1Y60l*@T2DE4SufL6*WS_wLNL%#)RrJSObA{9Qp z%_&;xcL1 z30g@RL_pk_&X;0?pq13Xv%2%6*dS=7QVe~ZkJH?A1~SYvBS9-UE=XA)ZFF|R1!$$5 zpq1znJ!Ci(7VLlJh*m-zHxWaqRyYl_!_+h3i!bMMN?Ag%@e7DyrrwMIC`&m~mh`Nn zEzKHEeH0F$Eaga9>g7;;6%L>*<&?5?oD3>YBkxbjQcfvLEaKuiWQ%WCa-=M2wb6$H zZ8=8TEEiFh4j^SI7g3hF=OH2}OF5-1p$%oV%BEaG%2F<-EcMx#GFJ|e46@)ZwUeZj zMOVoiA(H4M{~}RmaoEnorGUO*wD`F@y;-0v?A5SmU5&l(PYof#ea&Uk+M|E*33?!HXJ1} zWyvvj?+r63OF5-1;cSz;KDwCMOJN0NDHl_gG;$lN#4};`iSPksDHl_g93L=@%w$vqUE zi3AQbLs1m`f1o(jip)Yk$p^+LJ6{gldD!mb3nxoi+8~))%F+yRYxjHL{J0;#4`nG! z0Y192lqIx^m*!h9VCBfGU$7}<30iBzM_VSq=UJAr1cm*$C3pbGN?Af0R=u=AX4F4R z>AaDb)pg;M@R3Scf?ocv3toa9sgxxQDX=FWmZ@^2QkFhOMBcfVexx!`F9oFsdk22u zl&V?XUGmEGX;rw=%hXfVFb}CeE*)F|Wl4cP4M)UEP?o4ngXm+5r^AwwQWuo6q?^s) z6nU~I1S2R*1*I(Ma>5$#g%Omcf>M_LfjBn9;>)p7DN6$pwi$CE7XJnfKuQIrER{uX zMOri}WvO@cY4E^Cd_gHoPPx-&CTeyVJg`Y#P|A{%f%Vu0j~wc*lqJoD)w3Qq!2^_~ zf>M@xr|~E}(CY=IEcHrbJ}FBDr7WS%y+&<78J}>T>sdhia z@oV#>OAnhShZh8LDOe3S5<;HI;_>}qpOWJXWt^x5A+g|Hjl)ing^_R7d}NWhoxi{fDJ#VLPx9>eTV3umS~QCtvPb8Xz{^x;$zh#Tvpme z5@p?aq9xEOqDTC|QY3yS_^0&%T6t|a55mb31kg%@JrXnfhjoh<_nBH{ggmxx;x}{U_=uzbfHpQ++%;-T?-Vu8Ov5rDu!#x@Lnk%VvCmbOR37$iaXqa_=9*y@RX+u!8}U3}0mO>B*oiC=kJTSafS4;GKxB?dHOZF!8XY(=>$Q~W$r zJQlU=XJag~GD`U!n{s4ts7(1d)Hu>cvCC@z4s$V^Fk_=fcG)DVM`|O^ooahz7Y@)P z@8JMFGI^Tqku&6A%U7gJQPx+Tnx8K^wBK7c(f2FM#~w*tQ_&+&N_x^G@7ff*#BOVR z%(FJd?KV!SM{cN0rDHl%nJE=xxk{;Q8zMTC>#Px4$&0Kk?~&BC73JE-6xkzJ*%-gH zGU}0?Hs#2EV`a+hkw_cGF6)t}n2Su&xJ(}Qlq9%Z4)ml&sO+Rgw;lGL{NO6+oP*A+ zy6M>Y#yle~Dchw#8Kt!zIz#&JB{8g#gDo%5HRc-<=#pB?9Q7SJ-10fm@hlTv^#q<} z$1D>a+U_CSI;9?Xzp}kvmvoPPT{1h!=Df-vua>J#9}Ft&EcYjZHqXH(Gl< zemTk_Z4|q#!S?7vgK;chXk$m?BFbgHlmcxO*{|v|mq;7MF3V*xb1|F#yikYWGd78< z_SPYIrPw(HpOu3xhslx@L-2iRm#Fgwh>m0AhPZW)dBGNJyXfL7b1J#PYU0jYHp_Ni z^JvX6Yh}d{e6^JPwpo%T>OI=J?i)oZ>%8E`iXnKLWF`N$R^=Mo~x%|lH64}>U zzqr?#OQelrm*w)1R%p}7`|A+A!6p$=zxvI#L$F&8w!F5VcHRxrE>Y)QB04n1tD=iK z?_kk!{iwuq{v+#}95V~OQZd?}smv-6 zY^nU)@|8tei_|TpTC5NqYSAUSs21&_LoIfRE^bm?C0R{ei_;`&)?%YA%CS+A%4_j{ zDfw-?ZSrSY*WFQ+vLP6JsbaL>p|v=nL3#`&zQRU(-pZ(verJ=9>^D^=&%qICqu6DG z<85ZeA(*oixM*BNxh$4app7E?aO)Sh7jucUQS7o@#ueEudtNI^k==TtvRm=px#yjD zomlHXuL1MPrjx*K<!-43u@hby~PpLKNY0D^V6vRfwy z$lcoklIn0}w??WJQ$?lh*6G%Z5tZE50TO$=CD!a8T;+pDfr7V#bBPzQ!QZb^k zTW!Qs!cj6V+}G$&{~tUIj9$hEyVX8G)F+Byx0XFo1iKY15zqVId>qx%`vKBWJQ&&~ zJ;}#taz}T)gxv>6KajBOO{@O@!Y+AsR??Uq4!d>95)Q3X9d@g#s&+W+R#jD>>abf? zE_Is2Zne7LG>6@4bzM6gcB|@c-{G)ZRkh$Whux~GYA*}M z*$#)@`t7B9_B!lVRaNb9*sZFn-Z5X!KFe-ZmC1tPc4~>rnp}qT{8Jq&tEy62P|{9y zsH|h?>g@!Abp#Qt3MiG2-hf~oK?JK+RqX_Vbp#QtR#m+d2-Xoquv(SB6A0E3M6g;_ zY9|n^BZy$Ns$eG&tRskEwW_Y2K(LM=f>l-3wF`k@9YF-Es;U;=B$L<(B3P}{&Wi_& zh6q-hQf(IytRoe{sypXhK(LNf1gq8j8~g2DFOQa3ieS~*Ez$tNTCVZCDm6f`x;~0v zE!P0STCM?twOj)Pt7^%#*F@Licur*c^cj{jy+ffN|lq(%^r-=uu z6Nv|_5j4b|Chn_FE|H*r<$?ly2)=jZs7|GKUe z#D;|wN3EmScpAmgmr-o`2F2z_D7L&zvGrdR+X8v=#YtbGIQb_O zr@TdR>c60nWJdolun<>sZm*VVeD9!<$#P7fR6^iqJNAWq6hVsrV z#kadC?mCL%JD;Vv`x_9AoljFt9VsK!Z=5z4A~U=HK!~13iXz_S^fPnu`h=gE*FiDA zn_|IgiiM|9>~|f-qK7H=|2f6t_aFw&Ym)iNA9ztfami4MOJ`DCwu<5lr&C;h4aF7r zQe620#Z@0tT-_+kBY)r*M^Icdnc~_t6xW?YasAgRZg`gB#y?Wrw8tRCyLmXpEpsWp zbR@-B&ZhXse>A>esqVlCnDPFye;?1W-^cz^?y-V5OtC1ya zzk#*hD3N{RPDk{KI~^z&E(doSemvkVxdz~QQtDi}WP}OtR+&pA+(6W6D3`UyeFC|< zrJ+w}S+04t)pF_ERaHA?xQOc7Zdpw1@@o&0=d-&~wa-WacLlXz zXzY?0XTw#Zyd{(U?OyE@lGl@__G+00&Z???N1A8Xu4>c&eJKv9rmdT5pJ#D4i$_)M zLqk!V*1b@ieJ7zfAIWtpsQuDJ8!;=_Yf?>HFRh&;7k&Y|NxOKp56ixORqcsG5FHZ` zbLkOhQcYX$sqH7_F|`xuTrcNPRqe^rH*jkLoRn14fux$YrmLFqHcD%2)w5&dULlb5 z<*#6YYT7!u3eQsxk(i=txkMt8YT7zbo)Pzxw(_K73Q~vG=19?apmdBUdHc;v#)^mE zylfK1@>YtKc<$~uuRDa|s1qqRULc~$NX%)-eGsXpqb$`_0iDj816mu$L^Qna&Y~rJ4p{=X6*aYAG$6 zCBWj3RMVP>YKnHEi?(inb)g5UX-z~mt@OZ>kyO)~*X^*;w!Ra3P)%zhsww9IsI^UV z{=IVChGq+1hBK(9HQQ}_(HS-~8f#2YP5a%jH&&gbn)ds~grUaZWb@MaHG79Jfoj_C zj>|ga@500;wP#2*&5&xEQL1TxRjc>aP;wofsPqr8h$Ypu<_1sm(R$&cLaJ%aPDk&y z8CJohn$~>A!^!FCFhRrHR(%p48_+eYJRX-kjHQ|ee}qqiOx-nWJs!J0N;Pf4THFKG zwB`aC`xvw)z&e{$)0*al(>|n{*6uw@tYiKzmr_zq>uSA}=NVI1$3Z5u7^$XpjdpdR zRMWb&4Jy^NE@OjAHLYv1L8Y434YWa}n$|Vjpi)iiT5M3MrgejCP^qSMgKbc$rgd#L zs8rLstPLvFw65K&m+7Wz$ePq2q~dj zaOsPwrgeLJ*NZu(n%0f+S|#n6YFanJ8^N%pn$}J89-+Ubn${IPoSQM#v~G?K-=I{} zy16!Nsit-FY}isw>*n(X(wJ&mw~(cesit*{ys^yxQA;(goq>KGhIb15wH{9Yu`6X6 zMyhFQ(YTp%OsZ+>fbpQ3rj}HTX8de9Ia5oqgfMwfO;h`01j!)CgKC;u)+SD%nvVUf z9FS@{U~nE((*bSTdL|F5>40pCj=sz)I7P-jBSBJ4g%%E~X|+L#kS?gEQK&k9eiuS$ zDS34^f2|WrnF&j!RCV7Pi!LVBwC?UaQmMN~JKf|#HLbf>``piiYFhVzmOmv0c~Q!c z^U*3D=&Jbo2rbIjf&0){YpJHSE6{2!tHngBY3+-0Rv{#ja9XwFKGg%&wDx(uLeLoC zzSRTOwDuQv3RFFAWJxuxec9Ew6(o`q@jDIG6yp(WIKzuC2oLr`PpJtApEY zQcY{+@jS{@N<%XVa0gDRY3**eq*%w;+38Vuyd^htwGX)-P)l8&6w$^C;qKi7)wDLE znrazvJ5Q=5hn)-Wx&?I+c=_8sY1ozAfbK&ol&8#Y0w z(XcZ>s%h=j+EI+tj-3KhO>3_ysWrV8Y#)$nT6ahsM5YBI07!9n%3Uz76sNcY|=aD%gGU7n}bx-+OJ!`l1qTi4^mBQH}#p1 zrW9avgjCbo+uSrt35~_R38|*(ee4h4am%l*zD!m`( z4WXLmK{ZV;($JDTsHW+~8d{nM042RdL(B4@nx>a&Xn7t~)AWHFIxr8aX?mrGK9vX6 zG`&hg2jxLEO|Q|=iae;M=_53Ba2{0C^pP4mBoC@-dcB4Y&4X&1-lCzEc~DK$TQ#)G zv|~4lRMYfM)XT1>(fTsfKs8P8S`L?-kB%B*HAQ4X((jaW1p`5L>TccR@41zghGr6v|kD%sHRQ-(guY>j8}So%OG6_r--Qbk!L}Z$rcH0NBoBrUcvF%5yY15NBVAxxn@$sivu| zt`X5?&cd#x2dZi6iaxB^%p}z`^+UHxqewMP{X})aA@Bm#H1#Pix}uAkR6Y%xGy&Q= zz%HlO-3_ zMuY7~?C6qenmVJhz3t47&0bPXQ@4MD2ljzUHBHHdUB-{6Ly4Cn7K9g@0QQU4lWhm^HBEilZqUR=+spgI160%0H|+*Zg~vp0QBUN)x22j+x}{no z4eFYFUAsJf!QRYX6Q-Ppgd(ab44#>SuR2sW+iRpQeT!Jkl<&guL+N8sO=+-?1Ui2S z%bl{CQmQG}5RJDXxipGuI{y_rkZOuWk7pg0(;*M%64g{w(9AK6xy@^)&ViRpHB}GJ zesD9LN5G?!YRZzBsb|6FQca!MEXA}ks%goJvv;EubE5xsNumvhIUY{059EOh+BFzA z0BTuPdQ*Ode6~F(E6>BdMtDf0;#-)QYD%3x;+hdtO{t8YvE+O_Et}}KO6)cqw&6)l zTTC?_9Ad7^k!m`9&;)q_R0`n-lle!mLwy|!YWdUoNF&Y8ool6kQTkM{l0H--gQta{7lc)s%x187X0+yT6UUyxNdd`VGlxpgFaA5V^0uNA4L#3KxEZJO4&%?1%siqi3(R5M# z6^x*ohDtScV;27_HY(NBjoH&8w;QCIhDtSUlZRn(p8Li|rJCZ!f+)|Pr7(hO8YZBdTdd zKbtvw$_zr6Fru24Tue_MR!~htmuhNL?}qW^KvIvWrcTs3WS)X*8b(x8T20T%hz6=@ zs8mxv^|!Np&($#EW9m?;rnnN?_SMt2=ex0oQcbz~aK^7G{t_NSH4T+&+AE3Tzaj^v zn)XVeCm1Rz;DhZ@siyA0Df#AOpB#dQ`cjI%us2TeohzMKs5~` zs;TY$J;kTC_Iko`4MO~Z(4s#W9VtmN0TU+kw;(_YDGRd>d2N;U1%&8_d(q6n61+AD%n>1QI} zP^qSU`kJ19#%@YA?d9fFYluu3T79LOI#VYv<~jg+7QhWu(@?3VeI{voHpgyCHLXm| z^jsQyDAlymLst+z--8>drlC?z`^+z`P5$TDSE;6-?CZ9Bhb(fKgQc4G9m{F_rBQ@X zsivPC!SsA4_EoBB-@Z;a+!jYrs;L`+BauH^-&bQ_rJDBb>y*59RMePCHT~oWre}KW zt5nmzeVvkTjUy=4wC@O|WnItpaRjBB_8o!6$B*fGCXS#~)4n6btz>#Unap+YDAly@ z2v`T`VszF7M0iC;o28l}f-J8~SGdx^(eq)}I>hj1Nerc$enN~=C(Jz`F-lZZG)P1> z#T=`Pu%5f&)r)G%Yp_1r+Q3O!e$1Tb;fpQum};s8Mp;bH2eAIRM$RCmnxZK2;$?u` zz>{hkD%G^iI0{BkO+%%c;wCVTxdO&sR8uWrTw6Am={Y^}4PC0K`eGz#2zJ*lN(50I8$}|{b-O8w@6`9%s&W(ISmugz!YkIauZe>){ zUIqLD-0)6yxXYoM_A21Rky{znv?5bmz_%mc(50GI_?n&p@_b5jcB!Vl3OEsNpqhp* z)wEXukBHpLsHPQ}+5(;z`Gzjlw8Gc)+#b8RRMTDsd=YM-nuadbv{wNW@{o^iWmMCO zOl<*2M82U*HLdV9Jxe0DGOB5>0-g*vP)$RZYTB!Sw?uAbRMUz~Z2_N(d_$LNTHz~a zedJa~HSJZvLF1)>pqhqCHSG)}tjludXEU7&w;R7FZb~)fF1K~lYte?t!%|IMk9exG z*Xe6xU!|IUvaef)@5Ql{YU;+qgkyT%i9M8R>UxyQK;L;T)%277wECuPr6iy8^KvS* zp@nk3{%)1yiN5~AIFb74pO(X;#MBaAn#V(wFru2~K{X8{s%ah^yzuhj;t8th^qF!e zh_qV5s|U=LB(Ra!TB4fjURrC3YKj+%J+Jl0xcHE2iUff1dK7t*YKnafuk%0g3#w^I zs;S<3Erzq{9I%fFP)$QpO&ZsixFx z8+M)yBdDe!siwH>d5s#e^E%i-H4RBMb!@yE+DjrnVDlWRX(@FbCK+ivGxarifod9B zs;RosIOk&+%c!P~wR13<1XRjkQ5R;i|V>p>#cohTcdq?$HQnl47An!YE66{Fb?RMROy zS(3Y%6{_hbBxR<=q|>ZWO{q&CBNj6y#+_z`YN`gF)l=SxQxU3ZDTcmdh11+j*;9s@ zW+YTo#|0_tqm3ycTtGF=3e}V@(L;uASg`+GfQwC4>vnU@sB&M1o2``P0aLm$oV3zTfCDqiiEc-aJfNDyEoa3OHW?ic32&9F= zohj}U{elGBaG2xalEzZTts7}clRMTusHRY*qy63m(MUBd zWox?s3KK_3Of_}Pvzky@P))N+HO1K`cYSm*-BVx%)ifJZO*L{t<^equx>vymRMTus zHFbR66|3n!13nL-Z(^#cAX>w)y>3L)}xea3ccse1l4quQcW?WJZ~PU zrlXW<`X(as&b{;_m4SN6V=~)d?*LTOuI?^*W%@L}_$pV!s+KWQjuO=ruAXU`A1gsM zr851Lm`u-lSTa%~OEuNa<`)>P2X%6(rn;Q4#v5RCsHU$Wj;3vTz8@QvYWflCY%}JR z-1BRAI8@WJ=pNE?sHVN64}ph6HFe6JeoJhorw9*hl3S{&lY#Zv01t<1s=2Uw*5f>Q zI8@W#Y1|4Ahicj@jd?EB6m9M`Y6Hsng!4rISy8s3X>5;+b0e?9tf0g8szQM-$hLu8kT=9q`OVZ=*R(W5^ zcrd?aI?x>Qb8%{x|EmrKiW4Evw4vNhRb8_Bc}&7HJcaY0-CQshLx zUQ}(U-67ND=q=co9zxzqp*9YfB}c244wT>kMs2GSMH`Yj>~#4_%l{AA;{Dvpze)X8 zXT>mfjvNe?gY`HVD+lMvf$%)#2oYb6B3StixFO;jQN-4{HsW+SFnj(|Y=X9uzN=W{);TLRR4cr`PDW|hVxqBa$1e|FA*1`gr2t!xSZC6w?yV) zRaLD8E~j-5Z@~CSRlN?loYsEcd>M!8<*x-Ur?uXDQ`VNMO05Mhr?t@=Br6kD1?zyz zX>Ib(lE+7?>RJn2PU|3Vu}lT3+O;0d)!OFm9Y-|Ydf;+e6_=xKrfPjr;`2oHQpt4pm1E~j<0cdnSME?5s-PU|@Dud)KMx~{dr<+SeO zT{0xn-M$vMoYqNRqingT+k*AL<+OHsAIY?(s%ozdAJ4A^E~j-rZ=x*iKkU)77rPCu zi@lAps%kB8Ijze*kk-_zdM$7{tp|DdI80Th4RC;Ug3D<=(HoZ)4tNpi`RjnoY2D$y zA)TSBR2x3OUk6-H>xJH%vToT%S8oGwIU|Y7>5SQALT)B-IinPp zqqAG20WPOpgX>Va2Dltoqqv-M4RAT-8sKuuHNfSl#?&1kms#_`{;~$}hHNzPKVov` z$z`0F9J%!%CTCB<~#px7>e34?+_bFEYisFdBK-6s~hfZ{U)WN`X*XAPnF>>P@-kD@qd zH^q53QSAN+#Zx<2z#s1+XQ*HI^k1obrj?;*8?h>{`)QHlxg#i^e~{vZQz%}%hT`Y1 zLku`_JmL*lw?vi!{(ucDDULdkV&erAM}LW8(*qQnpQG6F2a2r&$+#-tO zkEb~Ca*C7gp*Z`6FpV>)q)>RZ|f0N>z$0>Hd zOL6``C_dL9H%tD2JNKse_7aM_c2Ioha*DejfEd{MYl^AUWMujSr>%f!ncXIHoZr$j zgrc~K%DE#Lnm37J{xXUM+bI@aK(XI#6pMaJvH$NV78{w6{MLCL6c>%7xMVWLr3X-4 zww2-w7g1dPO^Pd?q`2}eimQ?`_4%z=525(tREleGaq?TQJ%-}CD=DtOpW=qsDQ^6j z;-+S~!1=8=cT(K4g5pajP<-VIim%;EaqFuT->hmygxlIFzBP^Fj>9PKJcZ)hms8wz z2gP@urnvj}6!$d9Oy;-VJB;GK`4smbP4V6HDIU0o;=$)A9>Tta-}?QG%#wcVBl}W3 zdJx59J18E%n&Jm{P(1M*#Si~N@nnl!Ed17|Iw^j%jN<9d6wjSY@zO06uRcWa<|`ul zwbpq5qU`St&LLduO&3{)$@xLGEZ=3VQ$4azOwLfbU^$qaE?w2azviU; zoKCd4z;eLP$tgc)lXSPJ8g7<|yiUZx2IPoY9t_BeK6-F=TYepeylnhV6#`;A_Xq zRu=g=znw2py*BW368o)2QOzEN#3lFmNKF2o_Hvg!MB-U~P8LHbr~DlGGguPHo(!w< za{^rU$j|A&iDNCjMHKuo0Y)YHIsLaSmy+3GN(Y36j*655Tr0`X>Hh>{=xBzU32^}> zKc~Op3E-uXiwT!q9NbpZ=$2{bLvl0$CIIn9d~Hx-{9vojzZ?- z=QLh~7153p4U_>bKamD%=~eW+OkR z9{d~>TWid^v)ZG{&#C`Fn={0+j{KbZgR8W`;z*_ei#_sl>cP(m*a%(FtqG*a^PCP# zLoJ<(W(lz>^iF`wBByicqOBWZ!AO2i{W1?LW9>fI18Yh0bLwBW!$#ZsQRuxdEu{BbDPQy?eRDMpwaPNmQJ|ccj!)OnezL=lWu(x-+ zm}7oU!x(R*q#g5f8YXyC8MgeKhH2h&^tb$+hN6daGv?{3$kHl9(N*#F z5n7aQC~i<=t>x#WR-n}ywu_1U9QktHOCuzaa9Rbpfh9jD^*pk&#t=8OYK_TcBF?st+&LoEeEMMB&!lb@5y z*b5%#Po%CD2yiP+eopG|te?&%I;G+Mn*5y9+A0jZUe_b74(`9n&q>Lb1}JMO4b3FP ztvUHQsoic#7fG6Sb^?@>{G8N7t_RdomzPCkSs~oklb<7>*&}+?7P!wRKPPpc+eZ76 zpVKs)%^BkMpZuIuo6{Pm0s8^u=cK^T(Yj-Efc%`)8#Y0w(XeGeeopFY?I^|xu!TT= zPU@PHTGMO5z5@9LVxoId>06(X>sG*hu_&LpUG!zupA(gor8dd;5r+J=+ z#umWOX`Zj4$p!FpnipuOs{npZ^Fj^HD}bNVydUNb;pY^<&uLzyp(O?IbD9@xXlVib zoaQAOT2=r*r+Jx%mKVU!X+BUx2NuB3X(fVdz@^hMZ zt%FOukB-VW$VL?SIfKqAjF9`?LFZxwp)0`88FZe8std160S4{XkY4~lXVCc?sx5$@ zGw5>~N)^D*8FYb$G6nE+23@G3mIC-WgFdgJwgUJ$gLn@kujLfL&lz+nW&`2p6u{3J zbQww=^K-&6d&$MDoS(BV`8k8``Lpz@7hvCw{G37G(|Z^r5z_cE!O^gRpEKzDP|2tbliBJT5nbjiY-f_6lewY~D|R`_ z&&mAIthnD=D z%nsWkEG27W8x6a+kqNMGO@2=1a*TgZ?`YMA&1~{>GI#W{VTYUioXpw1 z(!{nm`8k=_^lXm1#Dv%jCqE}M+1+`FyEM&HJ-}W$`8k>YEG1zA?46UJlewfKS6y2K z*i|P#Cv#Ork*x#v+sV(#?Cvv#@D4a!8Oo5~;5H}J&UkM3QKta=_2lPdep@Ogo6v-R zf(`tf%&;n*o!Jaf%L# zZ}rTaKS)V-J>l``sp2uftE_-};5h~PIho8Sc;FER`8knw;dZCOn_%4n(`AmuSP=P=j3Nuo{j0eGuF+H7&rKM z&vgD62Jmz8!p|X-#y%35`bSv6&&e-r+1D7ZAsQc&NCMjwlbJdI$8;b+2a6uhI;^9^ zzHk6PC%ThOIQzlPOg#=BfE(lwpFSS1s=|vU zG1I;P8~8c-4PWc1h|N+=CqE~@>8~@&tvGv6ky1_a@0Zy8t7z>!oY5Z0Uy(+;4jDH9 z9aSc*v(RJ_L1ZtxlhN%_Kjku+b``XDsbL9-~e2M@qCT4zqYt zll`h>mHc43lqbYo*X}vnJ@w#tDsld-kDnADeDK9VcyB5>^cx+XEj>75{)P4^4%13xFJ{2X*ioU!KlcesF`lZ^Q} z9GtK+Uo*Q+P80HTk}*H0(q$%Gz|Tp>{2aQl%(x3OvyXra_&Ld#pF3No*4xBP?(HV`kUO z9v%5P9pCo;OJ-kbJnhpSes-2re$E_@p@xZY{-VsoN$_*juXAyv>=1sAW>N^GCY?vc z2I1$ZVLwU7bexJc(IG{bNX`8Q?>Jj*iZO5>KFH`b@?!Md9BPv3)!5v zRq7CajwTRysV2}c9xmYLB*D+2kDU{W%VENY2ubDVES362$*``)6JrnM=eQp1s^XQg zhw^h=4-Ty2UGM-uC#n1#j3t|kDgHDzDnAFKD4H(jyayxrIZ5T`xH0F{V!EbL`8jUP z;&2$(%DN$`{G8EAB+JU1;+)v1{2aV-5an513nTbBN#*C1#oQGem7h}<^I90e&q*pj zN3Rq*^A+!d5&WE_@^i|JFT#jROj7wdu5s=^U<5y>BjV>&^s|}Uf@=?4k`X_rpN{!nXGXs9>U9b|5_>2=r_#fjH;TWH zeU+c%`ozQ|aMW?Xk$WV^8m|a!qr6ovOVRxh0jKQ|V@k^}}tclFH9< zJ)EjdgxiycOVuKNj#iDAvyxwNRqUtyoLDMCPr1Eq6^fkpgIm5L|%FpTL=2UB+*jM>E&eX|^d0L0>p>P8~C#n3LK9e-Xow1wp zb1G9a#am(z<>yp-=nA6v4BWuaNh&|5&-~KbF{bOI{=k)FCl>GEKg7S0vj!;_G6>pCtC_ks~2rNE+O!2iig7S0vju5w!DK^Sv zt^-2(IekaKIzShr-93l^eon{ks`mam7NQkmY0T&m&xh_45d-|34&~<{i#}qMI$_>5 zh;c)^#Q4%Ja@T?ek(XEEhSWt^@d~L9FZekfA9{Dl z;w)N`p)97DklhIKbCSx>K~dtxO8{f5%&tl0=adfOz@J8L9fJ}yhVlXz!S@}7xn^VBKa05Rlsr($* z%_-nG#I0TbeF3kIe3Q!0ag%fk_;Bo|{2bTKDd5|113xFJ{2bTKDPTq(^3knh zdScQ4qkvN*-=y+$+$5a>9uc{9%ulTQ|#NqY-eG9%4 z`6iX0<0k18Fd;in%(-J@qFY9V>*loJ2)KctlT?1rLK$H0%G(b7{on?EPEz?f+~wBQ zyS?_F9C>u?Ok5*#kn4e2iV0cY|A>8+pYzGS8Vm7F@tHW5@^jo+sJJQqGxkt^j_Xk> z1AXVSHXRQpo|T)ZPxjO5n=D$d-3N^(ixz74_4jl+p5*H8yV#1U?0PqTD_0_f}fKlKSyu9*1*|J4PXO5 zCrN&eYnwJ1Ht=(jmY;*IHP1{t0DACql9r!i^;@6^KPPGVIn-+#PCXAs@N<&n=isvE zHEP7Ex4{N}PLli_$HuFny(B&luOKJ2O;sH$_qe6hb(j>S?aZ_f;01n8((-fEjmEht zR2ux8j<0)v#pIzS&6c`G0c4Ob2?U4W#wFQ3|c-8P%{)wx?IX&Um|Yz z*NY;H(J%$zkO@iI9}RjIf}azRpK~~S&@9Avr2+Xls__S>WdcIS&QJ9&HQwIf3$X zp!4LVeeiPv<>y2ye5D#FKWDS`0zW5Eeh%Jxkcf5X$aR(coEGqN0_EpCyO2h+2>hJR z8^nc#mq7SAmNJ9Cj;1={=O|qU@DkJc)7T*V95wK)?)-CX5PnW6hCa^6X>K}OWSD89 z!q0JBkg`77=o}9h@N)v;=g=j3$gm6+?0*I1=OB)oh)IHl_&I^{bI^veT4hr%AwMUG z`8jMRc1dAJlf4~!N+xYL^jeopWRpNDct z;yaQ+`8k+0Wn|5M9pz((#QYp2;iUlsG0Xl5vy88R{2a%!JOwXUz|WyU&T;T_0_Er6 zJoXx=!V87FR1%uxPn2L5hwVIE3h)eSH!OHb9&%(P^1B{NhSJUgKPUL32OscrW`Um* zyk~2ZO3cCrRPesFw5X7ST7;XAc}{2ZF>xw-gv zaWwLCO4*v(K5D~J67zE$^Z&=(dq7uJZEeGA?Gz36Nnj9O)dFz5&?gXMsC9*$w(M9hZyqb zBm(}N5QgkDX3z#=$e)u4_;W%S_B1o-K4Qq9lL+{86hn3FUD`Q9_;V5ge~#*@(&|Is zWT0c@L%2mV&^x=ZF=^l^(}h{#|DyoNME;8?-#N2!G)u{5ge(@Z~1_ zIfc*k1*~JFKfw=2g72Xme@=owx)T1JLivW+#^kG)1f=-%i@L94pxF3Qzl;(cf3#&1 z{CSq}=M;WMf%~X5aWDPknkn2y8%}&=b4b2M?re_>T2?-a#ZV9B&!HTDs)PJFJ(NF( zU5a#qk7BCqq5L^NF`}Gg)jKE*#Yd8Vg=YtmF16mkLn={kOXR~>3}&x&C9OY&2JW&F zzeQ83meykeA^CGe7$aAU8GI@=>>ph&_6EYH9yXJK#^Owf$_v>ro4nhwchKd8Q{G7> z`E$Bl>2=24<~Bql6SWU2gVn!>~<@o=01vyvsWY%Er<6 zC4vX>UGDd;!}=(c@7T}KnZcup;30XJ?Or($p#&UaAra)y>GFs-w7H*(6we_x5<&i) zE{}U(U2iuxs_vf@U`kZ}oNg`nZjNMk)JLM7MB>K1s0*C7^Wi*M2^NFVqv5k<7t?upBU~>x zlL(tvM>qC8I*mEnz6L;c9~;v6SL#BtvsYdQ&>bVx7ct^7&mkTuZMtU0DQratz0Mwc z{iXQPt6aN#ojYY5_#))OeUq86d5mOZFT;&7gXW+wsJ;F*?U?%UMoeL~mtZ^p;% zwAT4cxPEMn*qO>lsVv=wh`QEr5i6Z~C;sNRIA9)>S zir9+#NB&JO%hxgT6}<#>jQj*|TP8VXJ4}u30-rsNR`JRrL=K2y#GR`GA2xzFxO<=%O9?oaaTdglgLosCr*O=e#oCfw2*v~IsOeu`AhDs zzf{g}T5bm;N~#^2hv%}U7afce_w-JUOlA#l8u*vy78^7EUkx zPq;kwX^=PS?LU~eXPHzR=Exgn%`|3Z#B~t0olZuT>Eq!bn@n}wH*nZh8zsH(Cx~uR z1CrP>!&m7N8`&UH>g@qYMHB77MCUNk7h!lA6D^$n0ld@SfP+Li0dN)uOS9FLxi%a9chrv105rblgb-nOf5W{T_*2tkXh>Ok|49)nc2ITS!N}73zU0L zzXRUsTi_s@Om*fcL=yV99nRWM?)e!F3H{qXTx}$(5fu722@XQg%({t1>S=l->-g8g zJu&D`I3H%0W7A=Cov=f_ayI7ydz2Y>7eIW=dNPYiU5DnXasPyxU$Cf}p|pX|3)^vj zK+2;ctvQCQ%tKipdPC8d=G0-jKv8)tBy-a7T_NTRC^N2RWgvu7J#i9xW;1{iF2mou z=Tww>gMeBbkD3$TxN|q`7v0RY4UoFeKSijuMKj>G)6$`(m3U3haBOo9OGLC5rUfjM9SLJ6Zp6ZY6=(WrclgjfpF~Q`JZ$cWs}S*h@?rB&QAXD6t*Ymf_9aXUiRPIK0-b5`zX(%N?+YxNb^>=8r;Az;Lu z`ezohzK*}pwDwdI0_L8pf&W3Zkc5CTqO2{Nk2iKB9*aeNnt^L2W1J%dtR*2}xe@~E zsbFO-dHu?j5U^*0soPr8^OY+hpf4glU%3(j?&nYvQ3(NkltffQKwl&gl@Ks3;zS_? z^idK~2?2ePL{vgR7jZEO0nJ?;O+vAO@*DhUDIQM;bKlMpcN?45*w{MEg+|9>9b z7BoI;?ZP8eUgpSwGnRXgA{;(q{TyMO;;8)phV!JNe(27%Bm^v0Lco)TOV?ULLcn4r z1XPh%OG3b6B?MGa<`NPD7Aqm3iXycn1T0oUKo#ZIk`S<12?14kYlekQu@VBR2tQ*WAz-l*0y>#jM?%12B?NS`%sLVR7Aqm3lSS%Y?Fg9? z0ylD5!9j$RT17Aqm3io9A90v0PFpo%hUt1uH3D@I|WPL#Qngn-?J5YUMtOGyaWT?hf4D0e9d0lNz!pc5sQk`S=F5CS?; zIGL(^y9Z(^A)wA~zJ!E;>5@ogLkS51!x$w5OqY-lFkM1Iz;p=-0aap7`4%fH(|R&5MVK@a3~5^w04U zdgUH@y!KCd?Cc@B*FTrgzoHU);|M<5PQFSWQ+A-OB5kKVFOTUT%VWmx@|fACEp@ZH z%j1MW@|ZnV9&?t;6nP16cc?3&?NZYdt<*{-_U00-z<*}*URJlN91wAyYjg3FL_*4Si}ey_mjsZ zqvdhw$@18^K^~iKlE-CR<#G8gd0f#9tFK6>EBTx6NTZ56@_25gJf6Q%9{+eu9xuKvkJo;Z$6GPHfFhlC?IVv*4#lH+ryM(5 zo>9A6o-OPxc;;O554<^YHoQjn@#b`2+7E8@g-m0%FUHOxdrXU?QU-g@?1_+@oD0U| zJ(DAU&_4PzK*Au{uSo{MWAKJsgNnC%2w7(IuU6|YyYdKD7zCpkwi!kSA9bLOp|^DDv*aG-e@gfX*bxT7UjJ7x2=>N{TNng;{Xb+72=-D2!S$?j#~|2C83Y%psP$`zC=7zV$RId3 z$=X*2!Cqt#6hY7^k&+`W1|m{2@(Ov3I$a*)u9L@IN9wOwwzl!dJI=DG&R>N)k1(XX;gRjG53-JQZKG(Jy zaj5HaKhMUdbiJK6RKmGC&qeS&`?GY(Nbc8*5ZvJIk-N5j6^88mk0HAdJ@Qr9u(4<0 z#@>X39yt#xUl9~OXVDr+b5_iUo8KFIb(_BrGn4bgD7urx-Ea>5->#(gjgRPdpxgHL z21UhFM(Z4lin(1xEv;I>?=S~2oF+UR$quV-$I&m`15e{lTXt9^HI7_?V$Y4&*+c5 zKzI0Ulx)0%?)qowzW)QA z&VbkH_WBRqj8Ev^ekI-7=itUaVSakQjI94Eqa$|FYGkGj>eR%&9O%A&3c4RdIs#0* zJ#{}I0dFMw0183Z6bct$pGP1zwqcwiAq0HoN33O6*%;eaofkPEie}N^{x?8*K}c!h zD=9y%5(9AS_ZyfHU2#nO8tR(;8@d-8>I$EvZYwevxkGvx{`><9RqvR>Pl;H5D-eDD z2GKkU#O|AbSok-Haw1;F8z=H}(}<#BM4Y?@<+>+St{4Y`DV##(ua_AUy*8rNb@f5W zBpv+yB`Wv29d+3(T$j=|g;!E}!R1g6vxfj}rOjN!JxS&FZ-S@qcUH5ForkGI9jSIn zJpKt$N4yKv2~D9)yaiuNYn}z5swqHB8m@3}q9(u$Dyb=$DI7t>GB#76zd_6<;zo8G zzckVfrK=T_TeYWft9JYxFx!#bs+q&ra}sZwTQzYV>^W}L1YfS5lv}kV@-PC4$Ej6r z)i`$$!mXM&OWwkfT%@7@18&v!-I=&> ztG2J_R&8I;t=hhxTeW>Xw`!uI8{US(t(vIW58kd*;jl*cx)e@2w`$k*Oru+uN|gL} zZ#jS^k=1a^xv!&2E0DEl3h#qVxK;BPdeqq%C5Z7L9^DIfQq3J%{yA3s+T4y?wUC=a z;a1JxU^TWjC3lI!t(xz1tE!yaM&VY?k7R_}QW})qL>e@tp@Pc-2Mu;YCgGDnQbkxlwFFww{WZG z|De@b$Q`n9tL7i;X@v!mOl$6&gQF@Y=(3pH4bVl*nNrI5{OPa(wT zR%Ja2L~FN@dvD=Z&G)%g8zQ(Z7jD)34_vp=x+Y7$aI5C~+^S46MzPjuNhBZs1gouZ ze}c)anm^aomw>4=`*0t>s}ElDn|pF+FWjnagj{w=E^6kf#U2znZ^SMnjv0JWLS&)%?v?^P*+v>4NaD z=GTSf;dw&~^4z9P z`Bw{KE~xyg1#MhV`Bw|_T~PT~3);G%@~;-Ob3x@_Ehun78hM9RNf(8UFnf3;vQ+XfBe^RE{4 zuzWoR{Hq0f+uzW|0{+#4ee6cq4)|9K`q-N!?D$s;`rD2$m(khruNDlp9KQkoYQYc} z-mLtq1w&of@vjyPb79B7TF`)hwct?6eZaq3P-E|q^nL!-b_3Y1-N?ThOIc14z289_ zFUKw@60hm=2>imo8XvLmRxI)2M=XbAU-GZUkK_i-l#zcmei(ZYIzd_QgAp2e89?M; z?VW|hgnzX|yo~&-9SXJfOd0uCI~4K69N+0;=HU?OJpypyUu|W|`29*N!yM5r`B(i= zX4#rbhFDXGTy}XVlxy}o61gh4vp1Vq_*V<=FC+hI!B%Z_Q%3&Pf^FL7kuvhH7CfZ+ z&qYR#M!b>;Yo#ljDtL!d%KUZZd0-%Q{HyJbVXd`(A1dKrZAbo9h7gIvD$esl;a_dH zQ{N(@Eads2@UOOe(@m2q=YgW|ueN(TERXYqQTSKey%jdY6G!1+ZMRL@+ofH&kaF^` zw%Z;u6QdXzV?hdeJSqIE?b^7PqRg?ht_6tmHgh z=!;8>!{bijUu|b>0~?x1Ax}buf3@A}a87HGQ8zm!iTJbw%CaMjpcrMT#v>sG;wh@| zueS5~S2YhjRu%r$b`ON>sITy^7Vah0S;*5?;a_c67^)3$_flTa4~dh1Rm;w^Sm9r7 z_mMLgsx+R^3jb=mjoMHWC(aXF>y^UJW%XjU*P6$;!oS*%{Hv1b);!Y{{?&HmU!_bI z#Q1Z#R6L0}zTLi|(&%bgvM~zPV)_dIYP*}I&f`1_7XHhGq(RHZ1(B?Z&!_ ztuIF*PmG0swcRwAj&u)Nmu&BE3WXJr{HyJ*31@``j!}o!iL$5hxLNpD+uh`F^^igy zLJR+DyID=9qm~MJNG<%U?QRd-s5jJ`=hwo&TD)Hw`B#e%V1_w`{EE26{WKIQiw;2O zU=8J#k$<&#u!aiC$iG@VL_>)(@~;*T)lk8TnU>$7yJE8TnU>$7|^5GV-q$Pt?#cW#nHiK3+q|mXUw8c(R7Zl#ze6c&diR zmhDZOvo$oXjQp#`b2K#G#Ca=VdzC>}$8xz9w{-2k!oOO)>@`A4B5bJAW!PJie>JhP zjA!wQRqR-73i7We&el+7*$2o#Vzq`MW#nH?tkF=5GV-q`)@mrXjQp#Kb2QYZjQp#K zbs8!tBmZjRTn!bLk$*KI`y8)xqKy2j37>zpjQp#K3z+MGf3?d#v+*)Z=U?qB{Huwr zmC)EYuQ`N&HSvhlWniN`3gF$&P_d0!I$FO~AEvF)m)07?epF!8R(F^+$LH^Z*l|_t=Q36PRLc|EKSA>5x@tNdA5piC(2>)v0tFV~Y zFT%f?aQv%GSSM>4Mv?>JUyV=T>)G+I7V?Hh_*dg)mWu#?)%Y*U$-f$Z##1>Z$RO8> zg}liT{?+&=p6r%lvgKhj#RON#OCI50jlb^VbA9iWy!sLT)%e$LZjzmX#^i;N@UOLa~5xpgnu=DaT8izDhdB;{Mm4k_7?ut_)97)e2XaZuf~1; zRjI#GD13}MQ~~R{khf97zZ&=XSJQWW3NGwO82MM@d%LNP5*R7v&=E_#$rAq6xX-^T zmbl2$29#tAdDA8QtMPv|u2J593IA&R-H_50@y(kLx7|c0#^|e>Lv&uS#`N9O};{5mr&0*Q3I}8h8Awlu8wv!im(8e>L9K z)7e>+6k`A}@j~9G3jb<+e@{2g^^iDkS%rT!{$0bIij6|vzY70q+~;2nV!QcJ|7yIOn*lh2Od&6Lg?}|3b#gH+4qFUI-$X1u z2<2~O8x}wjycQPz)p(n~LGZ>{_*diY!U*ZxYJ-=_!oM0nIW#mdoAP>D_*dhXrzzsR za2Ed6_{*9t$+Mi_#CZ!X{HyWLLOKFWoLAGrzZ!ovq|J^wj3iv@I3)Jc=u%qnX6imG?Xkrj+6()Xp&^E zS&qzA5i<5CutF;KAwrn;LS(Lz6Eoll>PY6A<;Yw^u_YG=3z=({BXh0S8dE8E43%xiJEtQwSwjUCM+0N$tbu4+{yJkQwlTVAsO>^nGHxlMZVOzKFO)H2gPzppXUXY~YwFwI5sMxu^l0V4K>z47_HY;L4O zhSGV7D6&JfY+=`9o76Ol6dhU_xt7224xuDPZl+AKH7jBj^W4TBHb4)QY>ajJ;=Ucs2Ast{`j)hf-f)2^phk2$IiB!o1>QKcEa z2)cR$$54B599pFrJ`*pGdhG&gNi!T(n&C8U^?GVaGaLOdYM}!p&2TiJ84h7Mk?FxsRdXzXgl0IJMl;+1Y^u5wN1EZN(hRdn{Nkvo>ca^k z&2TiJ8D_0%hh~zddOjgXqAy1Sn&F0!3kf03a5SJ97Kr3o`e*f4LP#?l4QPf1!b*(M zG1YGekbq`bAZHUo$5j6rKmwX!fdr1J?u2Y;LIKUNXl2}KMOPC-n&I+*W?1`(nQuQSIzN z3z}gyAaU8GnnvqYgpg)9N}6H8xHnYtCaOkpG%L;U4IGD|{F>xH0))~GhY`|L$!`LL z(hP?YI`Kt&!0K4cgi)m#W?yotm}J*LsWijvMgDX#d@z+{5{@d(a5(1h;{&DA42NST zPot7F!%?LfJ`QtLkftyLic)EY`B|c$W^x;2l4dxnG{b2zUk{W@Gn^LlM=Jk~i8>ml z8P=IEnTvOfys@HTnqjBxK_wSI(J;-hDr-hiNepsnJC~D1P)RJa6>eCF1)+FDjf;7WXr5To&lAGm|b{{CoZV*+P zVZO&){mLLQ$<6^nX@SxBxVU>DjXsx-sl zjwu52<_Zo6LC5g0Od+E97bp=?R)wNr5SFB2+bSG$^ff0 z!(nVNF_`4>J}jy^5*t->6a_baH9yJLhtWmqe?T}B-SKT0ZeIz z8^J=wS{-1OW;ir;%F8^a-SOJ?KEr$tqo;ie-<_UV}9{y_w#8E!g4P)jB`J&2$*!%au1UyN35WCYR- zmn+RMBVc)5|AwpYI65Ax{=pcevnW@Z;lGJd-w22Oz!;l5B8JcmvqF5DVGc1}ge8mn z0>zDFd6;Hc-obiYBY)buBoG{faS&9G*ec`?ajsr?UMu1Yh^tOSdf1yqt|II1+m zY0C4dT!0;9RB48JBoV}Xgi7w3%fmFon!%v7Qe7tbiI0tjX@(WcuIykfu{;oy(XQFN z7aWD``^$j9q)G1U!^*=n!)i@xP--PPmN3!`N5eG3jS^ky!^*=n!%dgtHXj=e(+oGj zn&eI&RvxAqZj`~_2_wyLbXkaIxKReXVuvNL@-WSCgG5~o9`0kKVVdCvSd&~3z```c zjWT#SVWb(3hG~WyW$;-aRvxAqZjh+U;CDVY8m1X;fHlc>{rr@}G{cQDIDjzH3`fH> z!;LaH(}$IZX@(ml>N0qqkBx?Dh8ti_a;pz357P`c%HaEik!Cm=rWtOO!6;4$1XdoV z8E%lM%V2LG8x7M8H^5@l`>^sb&2Xa(E+dRI!%?LfzKW+Yx-6IUT%uPK#xu64(hSR~ zk%Q^GNC*IlC|8={FoNSzttmDWgHo)OE6woVW5an6GC)Z|X@4On})9%Km3FwZe;#TabZg=RP^G{d^}x{qK}v4A?#3`d1#IIJ76jylo| zM;*;DkJhXiu!VA-VMZOzu#-Pd`Bibq9nG-FwGJ!Zr;;?oQK1>;YtQCs#ELBDg*3xa zp&1V814 zXoh*JYONVTn&DER8CHoML7L%Gp&3?*H)1GWzNJDltdh(T{9vS1Xogi18A0m2QlS}E zN$v>J43`Scuu2j~kY>14XogkdXoiQ8X1KK2GNU}B6`J8vr5UD-zw9H;aH-M^`ywu$ zOO;Uy9NkNt)rJ{{kXJxTT;Oeubt?QLBS} z8EA$@7JCC4Q`9w3fM!?~GOCLX4HTdmu8*N-pB(0m~p~JOcrT|_v+8tq2gs?Ni$q3G{X`#??c8g72i`wn&DER8P-uJ zIxS&E9&vqPrc`K#8v&Ag6Ywq@L}`W}W#84vBZX$TRB47Ki=)O8O`73Sp&8a=aHP8RW+onqk$CLUWo?ds6iiO$*JisNC2bb#M?(Xol;PHPy#aCBrtL z84jtdmQYQa;Zmg;=4ivNPavjxGqt1{E)8ggHS$arNk&5T6U2~axHO;{4q-+CXsSOW zhBU*a0nKm-!=7fUTd>;s5lCr3Gprb@V|{36w9pKf1~kK}r%J02eUpKbNI$rVM+TsG zvN363jOog(@c$S9#t=YxWn>Dy7sFd3-jWD^>?btCoe!CSM1^L!^D}D!D~W6%xcMHq z8s<*|GMhP?;W&SpEi}WO38!o}1WM8jcUPL>VT|JvOzssZ zm1cM{ExClHBPNFsu^VUY-IZoIE&B98sWiilqpv1{2l3sNW;m4Z*kMw#TZrHxd3U85 z4kh3aFA`DAvMbH7rXt02i0_CX&2V?68E$MNkEQNP8%i_W$i@(%8Sbt$!>n_grxl3) zNpNN400H}>vH_GtuBZ1FIBma!^JFC`E-GGu&mN1y?z!KTDiB$m&1}+u zHG}LaH$r~Ui1&T-B9Gnv7;iPY#vJgb)A2XHi$cO2c=Kt`=t9{>((g`D&a0fq|T zJ_7V3o_*H%79~dlTv}LOL5zoWx z8pM`d_#g3P9ha_UbFen?{WDA?=*7lNTnb7ezlr}?D(NkW(A)oYIL9`Fscs`%QOxaE z(K!$XU-b}y8s8GgQsDSzAodPiQ?!HrD=;?rWsfLajUQskFwt$A4<`)-)18b6m5~qN zCg|;72>FGst(#TDnBUg@pg_urK!&y!S?pk%Dmst~zpX{#xAjcUBoV2M zya+czZ%O17$t9s23W>;C+h-KevsphLE*?u@u;wfzQL^~Bg_7t1PDEK~R ze=1)>g4t8-DQ9Wx;9b-~wAG`jEnE-n44aF_^))dzi>X>f_0vZ~J=>0j4N3!u46&-q z2zd^3K=xug^y~(Z5?ZO=PDlo5(-*5mWV3pnb?L zK1Dw$gVvQUzXow*n2@s{wK-E7#_bbmpR}(mZ5a0mYTF^P>}TzR$a#9YCkEOV>}-q+ ze#lmz?m5&RgC%G7KW+4w^irFun*;4m`%lz4tD!#bv(yeiL9^enJ4ZH*`)Q!vWv^b- zFmC2yh`Rv;Ap0Y`8Z#!_0KfB+damk3 zl3U{qLF1$;zoJq;NzC-NPt&gpQBL_Nv8wsuzB1yqU+p)I%WWBz@>yaFZ$HdNj1t5g z6ex4O+tK5+QC-ZrR6ackk`ks>Lpquu`U0DN%9`ZJ9AE)1qwF&b0w zlLYgFtL&#U*Sp>lSZ3SlaI5}^xbDbw_LrI6r-xFPfztXHs!PT}-7Od&tz#p6VGm!Z z{6VdI2bx}K8hsbF9v-Oo3)E#84W{)pYC18UfxgB~UCFbl;yx+6UB-mwu{8lIUmCh7zH}HTgJ|{B9?^#+#nfD)#51-CwN9&>nyBYfoq?>=<=aLnZXRPxiD?5W0LF$nBPNbCg)Mf z?{>21c)w%bN{hKJP|o-6z?e&mc{7##mL+?E_x2Kh0p}{=87lcvPj;<$&wT$q=ae5) z$#-D(Qt!Q4>B^c6zT0?>mz~PEWF`DMjmOvuCRs}D^P{0XFXISIFJ7b`A^k8pl!$9* z194==_jm`hhx;}ZF(oM=?J2=Lnki`U>{|8$NCj6bsA&kJJ^FkdPSKJpg)Jx z3t9UxfQ`&-ea_!mt06J9d{~fuWoEZf>1oN-bPTk&X1;;fkzX!m$0P>^+U=PaVx@&H z7$BtNHIoCxZ<+6667b_oJ~fGz0VI+&6)zhfA|;k|YHkW3`B@bw)kFGXo{rHmHIEb0 zsVf@cF7HnCWM{=BKc<5BrP&>`dZ42+Q9lW7oo2|kBKFOCeWG7c(sNvm??uFeonfOo zYwQ@moMFVFL?q7x;;5`?C}-nTW(SCAS$AN1Wy#z)bg7(6MA!AOu_)^RbhySg?hFuX zvi8S2qp^)wi5Q1DAbWGxQgrl4qu%#<)p{Y)_hUvT452 z<`HMjxYGjJbxtPJtWKumE(x^wI#XiAnjxO4*hU2>%;s;cK%3^#SQ=m_0h}+Jf0T{4 zpiY+sl4iyC)V+de%|B~-N&{U>)VnPDnt%1tRDaFq;_XEpr`hJae?>g4eAV?FOdTiE z=D+^|^I=``c}l$TM-39os~9z51>4X4~j zB`3V*1@_V9ey4QG*Qw-$*BtyBbJCSHyQ$=a*Sx#^8Pl_0b*x5{Y>S1de2>+9fUU)l zZzCNQh>@CYgmBb1-(;hhSRKS9-==~iw)u_rtW$mKQaNt4{Xqohh34CB zKKwN3r=Z2`h&6>G9iwlahRw6cls2;d z=T9Q6Z)Ec1RYrb*o1k}KWIX-=2vSOTQ_F7Iij)?xVvt_hs5kIDRYQ@o=Wau2p@wqH zdLWj!h;hBn1&7;1UV&%k*N7l>S2Pn7n9W61dW&t|9zfZcR5v4-qNRcMG^aI17YDM_ z{TMO#!fUOcZ_2_q1n`jAd;5UT(rQhU zIM{xR0poo<>{{Sl3rXXk1F{-RBFt4~qywrgL2rq8D@9xq>4^?=bPdK>89XDrl%A+~ z_gE_AL}eE1nu&?285>ardznaP+cR)D@v(PaVO0U)7*!tQ)JwHutT0cU~ zU`E{LYh2H+dY7sx4?}gXJ^f8%j)PU~y~Mbw`Yq6&Z#O({45?M2)%;YoKMrv}-3`&j z)MrzM^ih}DSmAF8|G1NrrK5!zCQ zj9+GC5r;dM6VeY8oQK=;2SOHz)helekeEJKL;14Z^B*CMn%G74708?SlWn;@q!ts> zu+^EY=3{BUxp(iaO=6mRQ#|H|Po~HxP4me*;*+((C(F$z3&;OlXVV$*Iy37=>73J(JhwAikn%C?aoj*L=`g&%z3SHaVh^l5ZM~B<``9OpF z8K;5B#5fu2+}HZ!0CQ)6=?)vFwV4P@!)W(4U*o@yI#I=SX0ySuuW&WMG@X5kTazS! zU*3CPdoy;{YlGBHauOlZ>$5XvpfhN%ub?TDTuLR^lwPZht6%m zDi6x&hul6IN;BD=N-kEtWX4yU8^o*%l*2O~z(kT3b1Icwta?XgJoAD7I(3zBCY5~E zdPisMMu(O6g)3*x7ApCx8gFXGxF6sTw=Z9vCiyraTqb)bXB_gezf5+V^6EhO&y1T<>!JLaEI`aRGyeNQ zgP89G%8xUC#6l$;v*uSS?_-)jWn6@A;8!d`rltU^N%_Hv*CMm}-wn0BsQry^i%yxB zVuZT5rly*TO*cc)H*@8E^@@tQRPa|G&Bk1Tp{DOQZ3X&eq?(#be3W-gW-OE^fz*$w zifzPkYtih)SFdbr%~ZS}0G7wUZVV7YW>fPAQQQc5mt|%>734O?ac&0WPlkk>i)Obx z{jWf2s`sV5JKhYTPHWuQ+SjVM#YkwbugohTZQOt+{lt^pEO60qz%j^$Y)lK{?Vn_!=N;vu?xSW`co1R*9)P zo3_5Z8uH~?4_wt0r1}OzrlB^QUGLrWb3+T&PgC9bQK%pCUPQl1PoVk}sxQU3@-}B} zxW17(i!R_vb*)uZiKH2kV}J9Fqalr$z!NBB0Ij8J8yH_Rl|OBZ`52>LIa zX3SM?layjY{@vn*P++mXB#>c~B(m5hXiU*XRQQ`DQTT7A`>D}Q(zU-B!xU`Z7P_&s z8MWe#0C3okcquIn0nQb`p9JI-;X-?4;!EFxCN2fjd*lS;=_aY!olHX!Oiw4qivC3C zCh2kXW}UbtCuYEL)afSaPjry9*pdr>le7*sAJ($!UchUoB(fYP67*Jzpi%_*t^m2m z%`%nr;?^17{>9V5dH^i9M-D~fMm62|UmY}!)WE`ek@7nrs8uQ!wbSRMW^gxET7Fld1G^XebD*Uz< zh2PfKP@`=<5GxR-VDmQ6jh(`%6;A|!!&+dfm>B{r7Qhz-Xj?D2!ndG_huXRY<7r#p z{yo!B1k-bE-IWM!>kbe5Z7n%51FEUhw*CsEGcC5{!f)$eu#5?7S&Q#S=+1?S1id&& z6qkaM$UewD7K&8TTO!{6IcR`4fnr-zEN%TLGvT*25vcLE0vXy`Wb#)COwn#C{I(WF z(ANA532bYHOV>cUmmorAq%F)Q=soCTCg10zfny>0QvADG#o=dG^dpu&AFRE>UyKlC z<>zwxn>iSqL{t1@3ivPr@he4e^wlRB^F4y$D`~2^C{Er&EAC4A74%4UI-AERxRQR3 zN_Qo__Xpb0n67RWZT08Wx-02%J7CXgVr*ek)nXRZ?n-*)Q|XaoGFq$lA;eutfBIGf z2)muBK8z4|B|ZL@Mv0t2wY!r36iYF8HLN+TS{-N~vOR9{ZxQ{{-9W9olD1bijQdoe zebWAg>BL&{dS2(&xxzR&9D zgVv=xj#_sm-S@kOaccwZF8k;w4dY%+t-F$r--}O?(A2nam3pqahe~%PJsdA5{=6el z{*y{~B|QskAI-75wz-YtHg|y7BgcGV!A8oOJBvZxVc;kOFs-#&M;@p*Vs~bb9%V=!9mg?Y2TE|E0ErF0%(wbTMV=Aqm3^crw zuGi>mt@ZnXnpe`Qz8kZGY5glT!IiXV+$@+ZJP|7Hd~qc`=1XI?aT*CnPWB_hT}dZ4 z<8x8YFs|z-M^ovpq%*PVV->sTCV5hz9OYg0v{tDfbNG5H-IesyI2;Mb9DZw{oZ#(3 zo3k)M;R}F?N_+= zAy&SF9Zr~}SCm1oN-+!bhV z&Ac8x)7R3NNxl|nw`cZx)2~)dLVg8T^HYHMEpx_?_3gca97fI<1NJM@k^>X zO$LZHSp!~d%EnY8+?8~D^wxEaY^(|pH)Z|j7Go|DM5rvmm2~Yr=|c=_ryCGCF&}eJ zNrY~t2r9+jf6hYuQ8^gvnKNE4qjSz81WT!m{7sKASG$?=>`8M?&Sf@_Xk1C>o$Sl5 zbu#&zRXUlDD-X1{`P!2a&r}Sjf)i%Wzq@^E%xN^HCSwYiP5>8gIiFAchrf1PA!$}D zr;gKQ&W{h?yx|W~$H_7$(_VUedc@&x1!{@3BWI9Z_f-Ss(^T?=FK3j!6CHg`sOqJY z4*!e@9MI*=w0B~;)ELlwfdiamU%JN_lCa^|xU}PjTR8zdzRFo+=b$ci%9ZMJQ{a&S z;u^ajR?s?B6d_ZKtPpDE62`eL=OH`fH6~LJG06>q_8<1$xBYS|LT9gg1H{{Q4~&&? ze3@!Z&8vj)<&pD+edecn2)m@I`HEW3N;$jjOfnt%dKA$? zjeg0)O>i3#oX>I&_MXE$(m0iu1H@t8^Vc+$$`3?vKFc}A8wW&V8*SL=f;iqAj02y> zIqgdX=d+wsydz%m=d(~7C65UZtGvrT_vf=vS=!j6unoa&mBeyOE=*vLpjIm*wTPZj zFWnLmR7Q9UIq>RQJ&POs8=96-O2PX zvQ^Yj(FU`ich=ooQNLpl%)5amO~nlA^k#P1$Nts5)2*ORZ)Sgc$(Xwt!zEV1FDu;5 z>_vFP@Nc~NiR#VlV+82UZ0z3+B7Yd@ZtL-5Q;{>7l-|r9id|SJWq0vfN`T(XZoSX9 zAlY_+Aq42nZ0(0lS(rwE-puZ~qbUok2+*6^XRk=Bwe*|WGkyY2t4036A3F!Dqa<=0 zEF|cy6v4pUCHkF6B>TNw-pgn2X=Mx4*%W=o#Qqo!*;h7?I2@DM#lGx2C)1Cgi#h~~ zAALnzL@^b8sbE8A|J3Si{mj_rbrNe8M^eXz&i?Jmqx_$-INcoT*wER3Z93WDw_XUk zTqM^Ax@>^u;sX$*q zlCxNE7Hf$7_BXqeeRoO|TGTUbRz?beP4M%)l1LYNRjMup1LxKm`Ft;ueTjS)k^QFZ zYf<}Cp|-~zk1SwyK^SWPv_N))lj)bs6`QD#O1`DjkuY0@VBP|WRB=CbQptCG4s-fr zBBy(mI;rG)HekYB!Wb^Girs;3`^0g^9KfLJw2musiIzHk{C5D-B9oFnE;mPq~Lmt^n{d=m!w1zK*4!DUCkVN8oE zRx_4xWykMCsG2F@XTC)bGvTwIg=mD$V?}UT8vzI%IZAB6|Uvptn*4l_D7U7^FP8uWvUJb=0>Tv(I*u zKl$soo79Ubx`0(6GPIq@WRajOy2ss0?FzIs-k(~Lv}N%(Wf{d6>Z;5PY`+ZYq~(L% zsy-in|M@dW6a9yg!u?0W(t(5i!}1Lr1RM{mduFd_rGJgs*s^P;qRv(x2t9sIZU$~V zT^A8e%*6AVj6V|-hwil|kYOeknV$I-T^lGc6RScRsbU8eIukGXUXR{v-e$2@@eXx5 z6Mu+TlU@Wl-OtqNOg!;=oRBew8aBxe42Uk;O4mYim-rK2tHB6JN-JoSzj^Ib;vI~|GZ)M@k63q` z=Ta*3GO#lYYh^=$lx_kxQ3GdS_L;gfsULJ4myJ97m$dE=Iz|e2b_vS_5Ont2f$vXN z{99WCi?2PqGmd|`bqo~fxq|~4F1JJ$I}RFCG=U2La!VBc<<>H4^m42E8zbLA*}NGv z7aPu~6;}m-!?J%s0jY9;BL(m@0eZPrhw9;1O@87NV8H!I@^hl}a_dn{%|3!?ce#~C z%X+z$@r1wokerwSU8vK`t@j^p5LG35SgOqsqnA0L=jwT@h_J1 zT1(;bcf^tIiHJ}c`5JD5-hP!UK*Y{Y8ac!kc0#~T{S@^&*h=S~G04slJxuEGkqiiM&Sp_NuwB(My z#f;X9_w9I_*omVcJOWDA+}Kg~WAWLUzYF&|ALEVPwhh5OxuweJyvllGKYA45GQI#Z zI&Zb!q}W*~!+V#)IpWKQ;Ci`+#&Kpo9u}?s`g~Hq05$v3{|d^cp;Ew_NfQFp(e0lU zl$VBjR!}Pf)Wpf`mtJm08tNxO-5j8%o^h|BGSg6{XCuV{&k(f}x$nDA>1FV`Osn$a zK)LUNP4!C2#Q<-mAG2S}kWvaVpktsMFd(GlG-?L)50ryiZ>%?~**>1i(;)6!buVLj zxmju178B!F5LJVQviq&NszGJP*fwVTH34Ms*;_-96It$}`>ADLz^Ch4X8iq!K-Si; zhgyP<|CWjs7znmlTFq(Q7#Yw4^=-evy3%$#1g}y*u6$)y(Ur<6i&36o<>PQh1cNq@ zc_6F67DVb>^$aEeS&X>KU&BOAIXfi9`(wNv4Sz|5{TElt=v^g|8Sp3Oz-i0o*u_3o zaRGdG+{NgbU4DSO<_f082EjJ^4nB^(1iG#>p_3dq-5~oB$38j8K8o(LU+Fe<7uK}RV+Jhq^anf%54ms4{KJ7v!QV!t&V!fV?`v#^Y$$euAMRJmZa?)9bk8*hQp^4;!|*OL-mj0r zYZCmc@=~~IpTlSGz?|0eIQbK{u~E3d?pAxeG($VR5Vw9Og9o3uQ<==J*nLvgnnQo#n2@=bt z?gfZ9l?q}At-yph}$#Kfn6Y*rEw%;C!h0p^&N>N;7`?44AkdQhwT^ z7>3d?{BkAr3Z(tZfI+n_G}+U50%200LCQZr{OHBXSk{H2=L5MV>5)x;2BzK zA^ck#MoL`5)%Su*A2{kdE@~8{`+>2_CbgzJ!=Z zUz!Dm9OygwnSX*OVX17KS@1W}3Sx*w;LjgZ5qbyOt-+x2H^Oo9ut)Czd{I~fJZ%w|$oLfDs;Az!Vk!#{RqQjbG3QkrdI z)R-O@a1}QF>TbSw>Qpq`8^HXCey=8_KbzFo5FV7?$4tsQ7aVYCOhq!pAu|E#B;Y?R zT{BghygL*JF~3M?2>d&Ogc!21L*bgl>yRd%;h>m_a3n~VA$-##7=Bon;CFSYH3*Zg zLE9dT4AMTSTOj7HOmoMOmCn(py?3Wr;PxxQ-B;=bgvp@bEc`Aund}Sg`v}bX z>`5F51E$IfsliW#AA0BiBndD-WnZ(K&$j&A3J_?VPROUBV9n`pvazDKMj4uR=QDzc* z!gA_!cn@sus~HNa;~ND0>iAJ&OhwO=?w8;vHK~>xkUm;mO-nyDsRJS8M>r~!!R=QE zRep6G48IXGnvWF1{NyxbD_j!`p)N@8JSMdPLT=?W4nsC}F4M-b*ux)dXG zMZ+2SH9+_yI)yNV5zQZ!rM`#%^&Y-PmOC8Km62r!L0fP>>L&+Usr1NJhT%QlM79>e zYZ8nuPU--J$#kwsh#|}clOZka0_pU^IR3!Z@1%sAz9ZlF!qyx}^)0gqSkl6%1?gob zwGKk^y`%}Rhmce)ZuosBGXv+im!UXmsE=mIPMOgQEMnYaOqtQ!`S8v_IQ13uBz19S zzOwob%(%c9GPXGJZYORJ@vihifE4>d%ENHYA4BeK@X7%Au@ZzX!4vP)IE2aA;>_X) zU1fM%!Nd|qTI`#U7t-`^Z^97$ks25KB6#_MqNXE#&ZKUJkY66A37>$F9}NbT!`Y4L zd<(?^=+ug4NVD4KgWd3>DHfSNAN&HZN$@Mx1do`+N#$RN38mhoUs2s5=68e*Drz9a zWTVn_7(!?Rsho>&JQ1`aLw?Lw z@Vl68;Kj_w2IU=*#@ri1elo3TGK9kNW9ZZ{_<7!>8it_ZNm6Pm{QS+KYUKT@Z_yP{ z@I*%yGTOs4#74#ob_@)`B7cc|!u|B`{W{k5$7jU2vKPms-hrH-eXBv4T~QcR?0BwK zWc6=Sy8${k=ohkfmR4ztEhN7s*2vQ3FtyDkmlb29!uJ7!Uwk&|(u1MkXMv%pfKP#x zcM&RO$R8`*zv3Hcv|WN{*{O{P^K4R)4EgPGCH$^Eu9ZkUrIbk89uGsvZ}BvyZUj=V zLdY+JL)pOs7(#w(oF;6E4Kt4tRVZ(8S7CBSI11S`sontb*X0V5jhzGWe9zVDY=HRl zM+Gs&x6WJ)dpY-le-8b$ID0y@(gVav@F${kE`aQk2nPKbR!vZlwwx$O7fRg*DS!AH zN(Or#2v_Dnh-^(Vkrx@3N{hVs8oc3Y`&WdQBAHMc^8WHxS*qMCro*MiBq#bPS<%ms zyqeU(P?4cd4aj=NRR-7Gt}-S7)V9h8g$DR64seq?9eOgUsSz2^epjOv^qu>FAQxIA z#0pEq?3A9{BWi1pQQr`kW0xs8se|9Ux?2YI}_2TcG zI1VxC)HD+^f0 z!z&>qJ(mNw@WJ;&-lIR{C$fu0@8uML`4der}jEVlG8ivz))~2qA z9o=LMKRHtjLo!(>_^C>&6hU5qs#1osNgV_szbaB8L)`T6bC=XC1bD-zDu(<4Jr{oI zn=MA20C*kW)fZnz2W27rt#s%A*@NluH@n+WoA097)iYkJ)K zzlVTH@XlYR^VH7>--l&^Li?j%T9ayj8GbB|45?7I=-RtyF`HA(oPz*3wt1+F$@(&! zzaxQ&)=!dt7n=bwztDG0>36ZE5cB5|nxLD%&WDgcsZgPN`QHE`f0U4h+yNm!vv1g& z-hiAR(KqZkvCECQhBL)1ZX6?b$lNrGJ4fIDuVmn#m-A2f@3m*4T}-M9QEOHNQS~@q zqdLDtRsW3n*?=R~ay*`_Pq0rYLFboP>emDha0xzusDJV8sCm_!2u;xWC93*o%s*8V z%(())$od2qh7xpsiKYH|f82@^E|Uc3<=hAV>EfT^n=`2~u-6=Q<#O?J$e#0yJ@wC9 zg-k>|k2N-L9sL>Nhdr$ilR69b+KqudIl~OSOs4T=;?_O{VJPt{s*(r~Z&Oqn6 zjUlWBco2Ki$MEyk;k!NqS>xtU_w-JUoXlfV>g`{{tvO{1K0UV+8L6ZO=eG&oZlwN# z`oO?2Lom$q+En{1aZcAjvtSg2yuC2{YH9=Ab1*E{Cs&7%CO4I)o?>#TIf%+n!L;OZ zk~D);xLJ_18AexP7vhZF?w-G=8C`yuHK#tB1kvwc9yjkoaAD#m>>*~HY-}~Zfm-wl zlHh^W2#9TU?f&q#J=~hbJAhvXnOBfe`x^YiA;ei63v-7+m+;7I7ZK;z6~kNC1`hwF zI&Q`r0~MvYW(f7e6Zup>_<3F*)|5nepr0BA6_4w~ss-C2qI%~bXpVa-x$xcNMMUxDChtgdIBkr&+4&aZ0&F^Xw_!Sk*d3?y6vN4w{C zL_T$AZ)j?HEAb7GzwF$_8Y|AgzJV-LoIEAT=SeE=6d$9semBrMbuD z*h?Zv)C;nXv}T2U*2Vt7J2TuNw?!aZ9XsDk;6j z)^&%!t`nVhaQJV*4rG!{lixYBrAf`~JXn-W$~*d>2(3hdIB5MclPi-BBhfAA!t5$Y z7n$tuMS8Y98Rm9k1KPrw$#qWv>fVRkEaswUGa_dHkXd&j{B>*KEap<`WlC4sx|1lK zK`9q#<`zh2+PZ#}?oDS;O8GDKK2p0D#Wym`NOgwZQL8O~m(2q9bh#Mqo+;$M_Bkjv z!LyhP-=0Xtd&Jfq$;uf*r$3yK0qqjMCpBsNdhOC9bR^wtmI7U7Jx@L1)goGQt*ko?7IpA&Ka|BfJaw-xP|R` zTcJA9)@`SNg&aY_R9p8k{B>{9c^MA>t+lTsDek$K5i`%$)xlpkpU!mdg4f!w0pJq& z4gy}utowx>W`%v(mAs29IY$t>?(0&DZ>1DnP)Urri!tAEF&DF3xwk>`dXwdvNL8b9 zoo#P~F&6H1m+pt|=@;}I%aS(IHc{%uEbe+H)3~^-lhn^h>ch_H0=kIkW(O|E5tl4@ zoL^qw-Q=~7JFYGGs9?_kz9j^Aet~Zh_`JzK;thX;9W|6GO-EGn;H1xc*z{emo4)bT z%N}a>Wcu{#WF5Tx)G8m?%h439c?5!giD0}4&UpfYH)a0Z0>N6l>3U0am*#fVN|sl*AN+ND(J6wnm`l=1s-CcQzrtVl4V@3* z$l7QzTWT5g+ih(JRLi$Wxo!`5>O45}%i!?eI{P6qvjf02wstGB^M%K<9&ijx()VWw zzLAX2fnYIr%ei==HT%k|eH{MtAgtR84+lg12JEwZmccG8_!=U`77w)!hMq?G`xyV!~(JB~E|4OYJE? z)!)2YVt)lp@I}W0e~xA4e&;Y*uB$iO^}nm!hB5xGavS0Pr^;;v4p(j+D_8y{%BErl z7>!X#LJ}R!GA3tnID&kHT@(9~A{(r|N8)xgP;IBIv z4w`EyqOmAPxfCY&O)%cpO@#a$%W^c-dY9X}HIUbFon5y84sD$bTiij-bT;PuwkF!T zMUdCcp)(l{%(4$Hb%67I;3O-3q!QVdK9Yh5wZN@W>N!{^n^CC3ag2oTL0V)|#f5?Z z7aV~XEI$ESw%+syc6Q?bq~N> zdk35&p+uHhwyT}7o7~fDo!PK#X2H*}^Wbq9Bd%QChiOh$!H(oWdPDBGV9EQFykbre zfh>7d@C*cN>_$w`-Yo==RHfM* zgH-CXc|JnR?G~(bz(B{Y{VfD|v0!5KUa@?!9B>>3vUYz@vU$1)q#(OQ@Eind?U`6k zaFU*j>h*ToI!@9$L9exsK*#xcGj;#64d&-vl)Cx3mh*FL0_Jv|qu1K^5trR5aTicu z+4?D|3Mll;RwyU+-?(ySxpI16Yj-;VFzIgR5`T{6q&di8hWpr08m@|U(rkola*2E@ za&R?znI|js^H!sK;rbK8b0+vg8PR$=yV0Y{oz6Z7FDVr@s&pJh&9S1AhM6M`<6?Y- z7&S-vF&MHYVKL{V{)B(mNxnu-{mk5sc)Nai49-1H&?F>6X#bEJ000T^RW0lLuzEby zq}@}s6eFx&4Rv`G!*)6kp?cg?|Mh}+V!2A$UcQP_g&1(BL<}I4} z&W*WO`)WVEi|uv+6kouzXc$ne=Mm@yHm`0CymhC;S$sL1W$YT8yt;=dy%SFD^>F%3 z#IoT{wDuiTW8wWZd*p|3*4lkwZtgnN7geimZ7uwxfj5gO&Aq!`x`~;IDlO&Lb{MsiHfLA*G8>nxSi+6>`lpI}>`@ zVnyMZYG&u@qrE(+NcD$zc6XFkx-LU$r^)Lv{!`N-EwAuXVMq^vqaV(Of5!Y9H?n>b z{A-}{Mc0CDcj5gu2Q}kHS4OqA=m*HR%si6Q(4Ix^Guz61bkC@>Z4c7n-Tncrk|wW`*1Qa+fwCk_SuND*Eu7z zFBBWIFO!G7)#a<42M{JRQm7~6%TEZOnC=VxE6@h3sZ>jJ3v!rg)X+KqG@kRjQ&@tS zaV9o*Zv^-Yq!xp<+smCsj8X+GAnPI7XL4P~tvFOW@N6`5eFxrxkUKBxk-@8<{FDlC zjG%%;onQe4H2MGpYwZ7G?!DuqsinOsOei$!M)@O=q~Z ztrjV28+2)#msvgofvSTEccrLK_nFEzo!f=5N$JaukZJ8%cTb{Msgl6Y~jt;A2b`08Kgy_YZ zJjBsqXJRxsC0;i<^34GHZ%o-6iLm6}NJOOQjd|z;-5WP2_eOnGh`n)w^|hyj*8K=r z=#5&{7ZFZ?sU#$K$z~-2OpAUnq0_Z!_Z6xZ+FWkhdY0Yh8@$?xR^0-@!KBTJ931nt zCi~!+M$cM$)+5*fam!%Dcuv)G;5{@hW^p&qb}a65X%zB?A;`fkC2gCdtSjwg>Zm&uwcF)?AFsqHAzjdFFkn0w6r!jJLEaX=F>uiZik#q%g8nkuBiS z%pN8t+01TNkAOj2yWh0*858qx=rZi?2QASp13ZUNRtT8P^5t8l&ZFxLFn!+5P9);wf1xM-Yi>$_-FOp}|;7c~qO0Q;f9TCiFj)(zGPPl1)n@B86!wm}i@oX^17Pw$o7| zT86`DyJK2v)k;G?L2|tPjeup_->Hlz8yk%E7Paj$oDYD1{`xo3D~F_4wuy~73abAx zZ>^dJ2o0@;5QK#N#wzYNx`E|x^-69rrh=HFp}U-hzLDI}G04M))-kVbLsv@ENwM3A z%HZe&6xz}gk#Tsrn2HW#euIDhm#<+EUN*x-`yB$ue+Y{p?w{QDAzMzrk&SV_%@JP8TOo7m9{>=C#QvA7YaRfkX(n^D2McBRe0kYx5R zM;TA9xrIfHxw}0Dw%rMO8D4q^ve407sj)*(n(zx$B4&@G3$^)~%O5{umboBYt#_Aa z__(^LGtFW%Ns{-2U#k|O{oFxLMZx|veD)6VDvkU&F3?6@~qMxV^N< z8*MWd<2SEKw<7$d>!nMcwXCNmK=X~D=f;plr;QD2CK||h{NFHTy<=BC>Z`c~bf`iNApB&2UK0KJ zzM2J^JQ+#8^IuJJTVKsHh}RtAj<;<{5&VB2nnOAe`?yMB;Ro z10+u8Hj8PPYtVMSjA_Cgdic@2BY3Hj=p5@qYrY`(+h* zZDSvcN%7i7t;z+5nsl5991*3M|DhMCNeT16l*B|Wss=Gmi}dl(a5P!p3$I2jUXQ2< zdzmd+5wy1SM#ZQM))9d0wG$7*4Y?c1@eA@)lI&5gS^^;bap{|YXEA?pq?7R?2;=Bpr>)9~#=h&$4meSu@BcX?^3Yn2yG zr4` zSF73rLhDzJoIJx3i?>Zp!7%P6=>}|IUj;^#mINH)r09##ODX!Il*Gios0J}bU-VjP z_r*NK;`&1N;Cq?vWX=Bq6%?g?8K9WbDs&&o-AJ~3MfdDfk{qS&10a81Q!A}J6O^az z_#I8MW}0uc<|57?8cl)?RAf;ztF8K_ijK5+x-K7hJpc}Uz3NQz7!tDQ(z7dB%rwVp zaHirJA)uI!TPKun=13RkO^qbzF9 zVdgaorI*Crrb#!LFn&o);>MQ5TJ^FKdV)gZ)>&KFEh&X2abrTYGDq+Lh04s`j6$b| zuEU#ZM$~>2vd5Z(j*g51!gXLOvn5X{+>irM3hcKgZ~Y=YoDJ|g%@ojAOvEL3xuUQO zfjy29+6Q6eTNvCGKY*|T{1smyvRec10+5d)k87YjhKxY-+>arrBJFw%S*!krOnkyv zXBf^C#^4H+zWh_Li&4Z;uvJOTSVU`v;bJXsi9V)|h5ioCfK)oR2Am`v8;r`yJuaE4 zb?gLKNjesJ6%}{y=k!WE8@1F1{zk+SbnJapkvdkvym30V#q{?TCXCmyByLO{YE=(3 zfI`<$Xq=AO!mds!G>ID%YL+4jG&(i|g;2-#8c{cy(4}LS0zw@-!EC8?Y(D_hu}@6i z-)$Lg9eZzu=-9i!QpZYPOVF`XRvH~!fC!W^1U;aRRcoN=*gnMFI(7o-I34STOw_T+ z>zd){*h5T8FqTlz9&)=Iyr%6=IcMF}bZbV!68^ZgOu>`vKsM*A@NB zn8Vs*LZ^-^S-@0eA;qcO#kv4(lHOKfuVO_ejV#$GSRalPn;B;Ia02z}t&Eu|() z>tfyNGr*R&Tec#yyd6+{yQL1TPI|lLFo?--x4eaX@^;G!vg6-wNryPT#}dIws{MA$ ze_0=SyQRWK&AkXz{4GBpS@?F#8Zg~&w;V<~_1i7hTi6`A-)vgGZSQZOOLdAp?= z>C|tx>@{MXw_B#0DpkFVD$%XAen>AL8NmSvh?P_A;g$E1@ZROGpk9|m&4uRAXb7zA z0X+2-35df3O+D2bacndk2TRo>L7>;~29tGxvU732>ogmHe=M-;Bkw!7*;j&1r)93U zn_}!a@?}$Ne%VB=xEEtaaovnI3@p)FaT(gaVi6b_s}+vKk5DA~XVGPdSj&IKm7NlL z8=c#><}E~5UcCXjg*4pGt+)g6i;%HqFCxHmg7dqmYQf8QQF{rE`z~sEIZ%9c>?l#~ zYZ^dVSMjYe>m9x|R?~q|Y<1QHR)7lZGF86agvinhhgi2-agce+rMasMB&-@+y{m7-^@p?Wza*m#||d{|7NSPGu8$ zuIv+$oz?i4$@WVuyHsPlx)KO>+V6(rHv?$ewwfK7$a6Y#HD%j5+Qf2-Y-q=m7;$a6Y;`3t6y3J7%a8^ju@#Xm>KD|Z0_`>m(E$^h z3Q-4=+guz*+E$1^kkM9%vA|OyP6%EUBJx3!LRxIgGw-_(W-|HdEitvZ6s~Ta-j;yqoO7Z+)RrLb#;@FxvLXn`?#1X0=;_^-& z9WQF1iq#-pPrSU~+x0iDVB((4MEMO>*WdWsuYT9ps@jbl&%|=fi{;Q!$uW;PuyDTu z@TW2G$QW2h1w2y3(_I>JG{r;@22mmX71%PxEcz=j6PM4rmWkWhlYJBpIm01`zHyyT z(qnRlX_+Etn8*SC65zYU%I)bY*Te2I{F zdVsU~G+TY-^aq|1mRa>*A;*f;I}_S73DF)EHpuneXKxQ#R-_#yRMjBbc!`aYEt809 z@KLx__`*vDgT`!9E_K;;o_)B(G&mu}ku&(_R9(k#3(yhn})PJKI^RZ>! zhV)c3CH!UD5Dvc>hHyU^uQ2imA~}~}Tm8jx9HC-D?4vDcUh^+RYu?ADR&|6S`314# z;CS|XYP`@xkmrKT7s@M;5#$wop?nD>#a=4kfs|~!j}qeEb~FE_$j^;;#fM;;*UBG8 z4zO-8tU4F#eX=&Fe}TnM-5U)mLoINcWJ1q$y(G>0m z?c8l+Qe!8G*}cPX0t}*FS2wW2jlr%L<}6;geA3 z*E~C8*M;5eI; zaOJ9I4k43y8HdZF3vk)2IF*yY160i|#9hnFW?a^*M9fz8An0eJA2+C)qdaXAWN{i8LfkXVL`e4$`r zKKc9TFBEF{g#!LIsELq6H#+Z_B0n&pzsOS)0v(W@fY2vSsI*KnwKSo>*i$nd^v&v# z+7w)$XT`IiKVww(IHb^7PCJvkhb^VVQ?pH^{2k~dZn|g1d!Xm9S15bCI)Ro=MvKOn z+|Qd3U2r~II}4Z97pDL;xTof70%~!oRYM@=k!{#@n?B(UX$?QOKyzn;N3Sn4jOaOb zMbRURK?me6K`PCX$u@G(*ytD5FLJw;>7}= zf2py+4 zQp5tkMVz+4Hf#FeGGs+nVd_t2ffKQ*EpQU*B>Y4oDCJE1?KOSC0O9&B*6j?0E8B}8^UA@Jjn>2W{;J& zz`2(vTj2X3fEIWv!Xyh^2V#;1J`7^A1%8867{HCD$So#xS>Trc(iZrzVg1d9#981Z zuCX_>PPnD>5*TgE!qN4H96;4`lc6Z)I3J)E_?hpW3oL z3%aIus68mX=~kWkADP#vB?IsPG@aR0q)Qt-+S=U>%3pR3u6du6e66}D2xaw7T~{Sd z+NrKC!{PZb@8DiI;{i*l(w*v6;QRgC+pYm#L@y9&JJpjppnc>0qNj^CsTj2Hyf4*< zqX!!&dE2LrZsNj8-h#A$v~Lso_S- zpY5DHTF8%Ro)&9_)b@kQW-?F3>c-eDo|l(zcc7~11ZLa0??I&QQcuP3wxG;wJjAA? zRjh3a%KXxRO$k)A8Vt&UiaeVVs`wE%&}|pK+~1}|ayFy8+7_xO(S#>UQMy+ZH2o5< z3!*t4u4S8WlZSLpUzFXpO5Fxs=!!IOn!cQM{Z4HJd*D!^=d@#GzgHFAW27?H_6PN1 zZj7{&)%{U*ER2zwvcCUO7rIE_u);s7O0>!rzXXcV_Gfh$`f8*uF>)T%d#Cybu-z^7 z2ZDpFqEfrm^chuFcdr`P*`|4Rs+*eu=T{j$Y+BkbHG9S~t9yMl5(B6626n2UXjAt_ z>Ql5((?UDds`Ei>teTx@W0%9c&sD{{Gp(HB(~;*|asgRXq$=_m--Q zlh;zaRS)Fq9#IRSSz4e4p9;f}t$Q2w7fc~TOxvx_g06LMuda(a#K3O#Dh9WEq59S- z#|rIMorvtLE(gzKkL*?(SbBH$6ME7RQOz$fF5P>nD=_++xNoO=k=@u=HO5G5+Nj-X zHxk|ZtKXsdnwF*_J^bwSI*dm5(W*}o&OE-BPT#4XM!ma_RdXB~I(<2c=sr<(!Tiv; zw4I9Iy>>rC4MKG_&5Hbjk=&)`EU>ySRoz=z)+Hh{fKI*uoxEKA69c4ap~w_eV3(@8 zV3E~*i~1DmaF4FQ9<>==+`s2usQF%*mKIFjEVRFWuY(vkimz@(bIkv(cVRHc7E{^YfxmF`i!Fbe(q zzl8zPw6tJFQ<$y(#ecN0NuG)uBcPNNU^EP9kJ<)y{{eqNON^}G@So6_;%5HbKKKy@ zY1i;O8iCTvzXNqKlqb-c#n-0a-x>Y^hjBcHzmJ|NzB#=yT4XTqf?2E@&FW%Xz~P4S zdJ-i7o1iVHjd>oGbW@gR977%BDPLe}#ZyjotgV)_JfjzSPGhW@ld+ky`cA$n87f0bZPCTr2?ybcn~Et6p0BnjqMOR(Td2^Kye!D6X)qa{B} zYH9OyX1}mRf@RYsSg}fimD?p)b%O*o2PIhjk^~ojA;BdIN)TwYu9XCrc9-C?Q4(xC zUxG~+Nl<&01o!Qi;QrSoc;I&l9&8T92{d}Bj6qiaDhY}YhnOmPk3rKRuRxOmO^3cA z!7!f$qF#ZfBfgN-$aKtrK+{o$5}Yzzg3)s%7*i|3sW(Y5_F)FiMjV%*?n?=-_)dZ= z8$eeB&9)avaMc+ST)jks9lIpB=AZ=EzA3@ZA0@ah9rGa2Y*z~jb`OzY&v_DDzgdF4 z_e*fYF$r$`T!Nc|(8WNrn+qklWw-=?nIpmNnJ3`5*++hf`?mR;slxV50l`vN(tVn zk>L2X5`6jqg8I!GsBTz~>NjhsCQ2AoOA!W|&$&i|^PZ4kK|h$NK=Xx@B&e>I;QY%a zShQDyC4ZA(>E{w$_`3wlvta)M%~$o1pk}ND7o9J`nkyw(`=A6DKP|x}A4;(9M+w&F zv|{#4`%7@yITCDGBEiP1C3x`*so+a9vEK+ZfB7S}t@+Uvk~;Rgr2c-NAYVHw!Rwub z_r_r2LGs6n5$L>y9>Fs^k zJ|d55G={Um_h&|01cnPgP8YfyistjGZ(v!F&I|#m2=c)~H(mym3X>PELKMGkIvOmS z(fC?06v$bZbwVWT2G~HA)r&(Rkwnc^V`Uc;FEmFam;Cg~Lpp9w4ynd)fuxtR% zNP@3JVkEc*5)NCzWhK~rs$d$H#5JB^S8PGnqy-0KiCPl~o`IRaCKNoHhaeK{z!Y*^ zN+`1Mx19E=;PWW!h!xz4K3L}oeu-XNvnI`IsQfs!xQf-88>FFB!FfPY%W?$Nx**C#hN-3uS+#c0S%R3OP zkNQ}_o&x4<^aPh-00_%|1h6~M52|T96tD8ao}6aroV@0qV_osyDaXmHa#mmf^ICY? zVN~~MQszy_sF)`&8@~oP27RThW~wa-O>KfRD1nyamH-iGIbo#)6VH|4oQoxxu}6a0 z4@)rT7=rA~dX`lZfzakJqbt(7?nn>aH50q)9qAw9_6XX6b*mpR)6n&*g*wv3vg!>f z?}=Xp!T_npyDD%E--s);XEmx_xRDZl?a{l1TgD>VU~)@9TJlaUSMJnaI~{4&sxxMv zXEb+B%JOr;4EX$9B?I-$3YohZx&3RPQ=WRc)VjPm3zk@EzDVF!By#U0_XTj3Kc6e5 zx*nqwoGa=X-V1_Utb8i6nqLau!ilJ)(QE-Qjo-YaqlyGj@t}+A`Ja?p`!g|( zJU-tU=*-Mcl<0dDm(02`knPJtzRYbBrBCwA?Gjx_tZXcNcc9NRy4(v`+lw$#{Ar4} zLO~T$kQz!?l!ws^b@U?H*i7|6=yMbu3Y%mov;!3iH8IJYP)mZdOfnK`0~w)glPn1R z4fP5&HOX$F0u&v}G0CFPa&nuQWNBy|j8mwENsb8p$z)5DtO(t~a$A|?q)>o$&QmvG zl6qQC4SmIO^VRK=oE;iUaBI_|>d^P(wl~Qop&DpVsDoO8+IU(o3wc?q&WgI@ZM{Z? zs+qZq+_HIFU#vo%S^_o`44)5A^rHWeDg^azvQr$XgSj}%8({H4#(;lW6sBDMFnKCD7{tnU~l z51v~;tt{W=?J=$cIj-$aAA-qOrOraUJ!fN=Dc!9=cuc|567ON9g)2H7Ks-Ed1Fjv0 zVA~NMPoP!!aBIX*HA+y}19LSz;RbL#t<$WI-y%Y#@3fY^WVLUO#&?<06ME93@EI&E zja%y8v;bLv~HA(XA~wt`qHt#DH*Q|vkX zJZj-?S1M$68P9RJw=;*lEp#I%^iW|2kf8%w39Q0g6cswCbs$jqEUFcHRJSLDip)d4 zNPxZ6kwfRShE&$CBMrVo4O_lq6rVAPEzkS`EVZN^(ro9hB}`+XZ0J3%6Oec z8D^9_Y4v*>kl*&W>D*eYcd^`Lyz8dtbFZxqVl~Nl#|`J+TTR09mT^$V-xj}_^sfs* zKjabyzfw}32N!;4J(s3A}R;l zjEpZau-bO>u0-wN*!|N8bafiZ(@P2joua^L*YFsd2?19E^>$ z73E!$dIF=8aklB$SUaM;cT#(?LTAh}NPd8d10zW0gIccEi)UqHr5iV{~d<;UOqd_E?=7U$~d4 zaXK}j@cr&cP0*={g@>9Tb(&6{R@eh(Ir|))I=%3U&PYwxsWS>6qLisRHK}kHQ`2ho z)w1yFE=bksRHX3l1CY9M3^df!x}dNDrgP4AYLcgQH_P7ut2;J~-O{*y^v%k{UQ06i z&O-HDcRh%!pY6lml5Gq4x~fA^yPN~xumke_xWQN7^a9~=iH3g#SuU1;Z&TpY^4k!y z8@inHB$+Bq0wxR38}iG@s?P7pe&>dt@R6nY}Kf+LTKTeCMG@ z!&A7Z`?`1~3+)PS>1>~uWnu5B7cckE^Ly=VnHs%F)Q@g9)u!Z4QG%Y%Ss4Ogz z;nQVHXO>9mVe-&59D^uNc~pOxq;M~Tldcg97z0NyCM*K07VcsUB`FZX8}wX}#1kU5 z4-}0IT~vd``fG&Jj_z(azyr>1i^DI z^(AY#HDwKrAfAJ%w^_?a6KiP@JR(!)aNsU?BV^@^@>X5_N%emynaSfc)f+pSaFH1R z&gQV_uJnCAPukR5xXTT1aZLkuk(JNmH+3b)K@qq@ctwo59-vH zFty=JlVtMrPJNya-iI72x6CMy^wc(LRFPLRid`oE1ySzYAsA` z_=<#5Mc1r+9uBGs*!A$W2~{=#9viAxsID7RN{Fr_;2Ktbmb*Ji77<}Hjo5wpJa$x< zlJ!Zfn!5L*PmwpK2)qK1Hr}F{Bx3@x@aU(6OeCbQN8-Bo1>(P_HLOxvJ`XX~*~FYk ztgzssJl<5(2;YhN4J`#`B^Xa1|jP;7*mxslyUr!6)~rA z@Jc)qcdOZT(1K7}lqc3|GBLL*U7S=erXx`vW~)otxVsfMUuIgz5S@9>{T)m~phZ>T zKy+4%>3WZ3^;$GzzSUxePNi9$S?Qku47Hf6f%EkC$pGi;RKV&ShC%#IfEQ}uGS&^d zJS}m#P6e!X?MqSIZ=O8hBZ0QK_U36j3)E*`JovJr<3Lm4&VVj6%0LQooPnTElt=RF zKCIm>HY+`2xgJ_kp3$oVq+j8>U9h4&v{#RkzC*Ff_AMPx@zqYKNQ;2ib?zpIZ&X)p zN7y#2FOT|VI9o)#(!+Xt!kSx|d;p;MjJL(vxR;|SE6S$<>UJ2g7Te9erLH6&52)KQ zfGwh44ulG`N^-HHd}yGyLit+E@lv#FTY#HDGHW5c#RYDY?Cb@sD4#N@{aAlm%s1Jo z-g?U>^DmPRAk+|CTBLh*O?4xsxzf_^YJE4bztcS!;AVte>dM$KZMOjde{ zaS&FN4}4T63#d}sFvOUdeEy@FuQyZaR8^@jQb9@&jws0fW-tfzzDRP z&>aLVK`IoU0s&$^tN~XM6(DNzf3ObO)sqn>%qqE;h?bam?J5WK!1rFDnS!hVeCPt8iz9vH&*a}3dbLK6w3AwyAM4z9V3=JGjD?rkV4 z5;%tIpvhRG_+&58wx}$>6K=}&OJ5`_HTa7XP%;#esFJtAvruc*U=)m@8uA9K(iD@x z*H~p1>Av==#?zS78uE+7%~oq6g?$U|d~)@1)ez=YPqu`B=4v&p*<$cw0(z6X118fK zQRl;Ww8mh>xD&{IFAQ!Q6#yrWJ9Iv|^PsuD0@Y+-tYB-%Rpg{$caB^0IT$1ktSwb* z2zAOD`WSI_n2Nr>Dhx%5$4mi4pKuKMlt3@+gl~{~9fl@ZAi>s<`p|0i5!A&uT;-#o z90y0JF0&IMn^BmrLLEfWFukIYGK)pZ(6NM!?+D15st0;C1!NH+^9h-z9>&6AL!{0; zw6TU>MaW+p$8nua&_ULn3N8L@L~;p*MZciwm6Bg3z9nXVh#Pn zfviTua|pf;md)o07=c#t#Wu0vF}ske2&!3pox^z0W~_!BtN1a8@wA(v z6@_)Pia&KAucU_P`B_}Qjj2(=J2uW4R|9G9Kt7Cv=t_=qxS!cv=^YNK7LYlEkezJx z`*?mX-DnI!`DJ8HMDO`NQ8y03S378*Ek@c}e!GMCTD=E-b|Yk{%3pL4KdZ|I$0J0s z%0DCGY)k=Pnr9AlkGf<^vC4h4w8G5rya78xEwatU@FUjcoAW6jV!8YJiPcKv;NkaK_0*+(- z^Zn>qg1B3CwQ^tTApYkXr+24G|ij*oF zdvo9E-XKQGRvc+t$t~noW6>!1X)MOTU5aknF&p4p;_ZpamWXgS?PnWXP*44oP$O5< znq&EuGJSR4@hJ^XnSf~!-Q4} zOgHrj2llWdm%x%Xrf$nd)r{6t@|e1Hx3%> zu1E~)s(FQzp`$MwYs$$pSmkRSY^kpYEO~0IOM&(~8K(Ot!$PFa5Zl(3f9zyf=sSkJ zbm|OJechQ&V4HP&m;2tyJ6Q&&_{xZtqo zMrfsLh1c78M{%qX&AI@47J9JY2esd|nzJrec^`6rY=9Z`wXZ#FDy!mbUM7&)9M;wM zlkX)Mvp8lonM<*Q@crSNkE*0$)pv64BJ(n+bivD>L)f6ZE9kUVnq`$gWn+EK{dEtc!Ee*1y>N(4@%^F)L)D(np>Kyg16eR zf&%ZIrSY)D9(>8h`rh{6iJh-oq=U7}e{f)5`JW8M!(0_?0UM^yVh{f6zb&D;t_lvf zVFd%cWBwNvTwr5;P3!H16+1}vEOKsXt_pr;!wM#P zD^I3ZMhE%Hl@>X^-u2LKw@6nH_OW3FbG(D0Xl|IR2PYHO21~JTQN4d-h%bURxOd*B z(A9+9@Hk-W>YWWM#}hmo#&%fcciM=88t*RHAUDD>Rc4+3(7|r@K8pCsu~NLLgBRsU z9SZh%H=;%ETx_^i-qk_e?R^8c8g4|a4uTz}B|YXnme87boG#kRW$#4-vmrnCBJ>bm zC6McWT#EwwN@kJJ2R(`CU0~(-t}N^b`M&MUkCU&$E$K*IrBi{zFPb2AHQ%E4v<_K= zF2Q`DtQ_CYj7<>1)4P`Z(y>53m(*uzE%~`*fqX8>7mAf?$pL=o%a^9>J}h~JOm6youT6qT?gQ+MB_EL&;LYJ% z^RYa*c(NLrB{9GnN6=dZAptODBO}M&Ao>tI27em*NN`tpI*`*T^F>k0w0@s3& zOKs?XE#&p|;e0eYbaXvXqjSnox$QE7)@XjL8kLV#HARi)$Es2JSXEOzqxrFFR6bVK zl(f-&g(oT>t7=MMG(T32%Ezjj5*qy@Y)@1^R@Icqn96zc@N_4F@zj(-4Sq&vslhEV zG|^neit>y$XDsWKuRI`=#`pZ+$pNX2>eDPn+Kk3U^{21b8`P96kou{MVHKawl#J;Q zL$h8)ZIyfi8`Yn{{%w*ltyAYr$0_xzfGB>D5nZJk-$PMW@mCIQy`hC40LLo!abN|E z-!l-fK1MAra2R`R26O0#QZxrv@hJ}E&NztRG=wk5t6x#~=ph5KpDbKT4$r@$&)yfn zkD;=jrUFtk-wo*Rwk5O97G9NK<1D zt2IuYk6w^9A(CquJ<*8}o`%0pkHaTKA#Rk&IcyD|>_ioVEZyEvx}u{pk0#Sr}yMK^L-W^PurBu}!19G(#gpCag)F!TmuPHg~88_&O8xizLcfY#7w zh&jx$Y43RfckA(TsA<;F&xzqPo@k-x*-{sV>~M&3#;Yur)5G&o$i*j1cQ164iAW$D z*W6vu&W;>R3OL>B$g1%FbO8DiKzdPNFr!NljYt&ly!){V{(^oXoMgo=Xbi~{>JQR~P=O9BJc9?9;`zWTd6xpF$CJ63(w{}gceo6n zuMw)O%*_sQqfHEFfMu26K~59Yy2B0+zqinb&CMlS`Ez7mg@NgCo#(GuCG~c~F#ky= z4;4CG@9Bh-+P2JN3M}`-AZq5V9t9>pOEgTah~=$`_yNmJhdVug#k7sfKFne6_bdi8 zIr}Ukco5&=A}7d>HERra}RtUD0;NpMjh3&dP@Ej|%o zR8#e}I|(p}fbqM(Kz<9Z&Vl@v{g~>QC#jARriubW>y$o}EPuMN{EbLF<7p_#$lp+c zKXcOH-!UJ;jX+kcywcj( zYX*x^@1np_WY1;qoOKCj7T z#;0Jo`tCa>r48qtLGboDFrx!rMKf8f@`PHv&QMFW&RIju+o$7Fb{jB*PzzvMx-elj z%bKiyV@ri@GTY5MixvD5y>Yp>g|vbfwqjCebb>dRGWpe?=j``b7?EhzDJ&^G4p*zV zJ_KlbW1Df(R-Icw@D~)4t=L~ChpEk!O0u_8RU$ud3_Ky}Raj5F6zhd6*=o!8fV@#yIUUqy@ma0Pr688I z26X1EArxR$ZbHD@e%%J9&^q3jS3ZjPw`n$qDK)!ob8IE_*T{2N55%7Y!^&NTl5}l8 zLz*ujXn{0{MmuF6u4TfRW#!{!DG7>YZ;H0-B=yVR?`%`nIY**J+ zmI5%5&5$9KGOWr;Ao6K_g0#wN(D|S~fj;+c(D#Q?J^R(dqD$+<2ozW>h08i%BlcrM_^5S@bdZtU$=L zvAYS=H=~Y&!5GHKXhc>rat0!M7@3O56O7D74I`9!h!M*CfDy`6TM?nmHi%f2t09w%scyb>wQhci-TV?lbn{ErA)_> zUy`!wVtp-zSdNr1${fJdrvgC`hRK5WM-^tRd zF1E;}>XqVK0>>j-j@VbR;J%N^vCXhHosm)4N+*=L5UU;Odd^obJq1rF(S&`b>mHhf zw&QOKDlN^GChS0JDR;1{8&~Y_k<@mi8}pZ3iC^B?4w)YOzitG*Z9>&&SvJVaf-LjG zH*saJwKBzYGok9W>>7}lKL?AR$V=|KzT9m54bx6}b8J_KyHf1wL|EqemdmFb{e zjEdRfii}v_2WnX@=+-Ls>Qv+)cL1KBp90)fj)nkU%(qH;^E#8unN|JW zsn`U=VwW1-{N!9cXfLBZOBZ9~Pr&N!W{~`CenRqjCPy^F+-}1+{{vumX8IeX*TQ4mbj#m``)=>Hy45SL9gVI=Wlx~x{_e}JLv-0zUe@Dsv0|h5 znHoK2Lf?{l%bq9X5nlG=f)V@_a@gmQ&nRc*5hOWypPD@1*m>gyuL9t>!E3pLgLh%Q zG3W;@k-aACJ}5Dvzt6H!AX_U21Hf9ZVQGC6YQ0J-P0^_;x)oVj(c_YJe~EU?Gn1L zeBP8`vlgZT~0#&A{0o%yzk6*mCJhFD*Kx@_foB-!t)jQDjXjPLhSfaB^r zACisTS)&ELYy>@I1U#8i(7r@L4~ifw_h~d$Tb>3LSmMzGdi9W2T9q9^U_#+M*)VmeokEQ!99<8)X|WQ8)6QvJDR~#CmoekKG4``8WnqwYl)KaX5#Cf%?`| z@D~&Mn=k7P^2MqS%UvSVmljyIk89!0yzG<(aXF>fxjkoUJ&%);$V?^0T5%TYN9_rH ziTbr&mWyZ&m*Y~T{blYY_`;h3WvM|^T2`u36Pc+>z3^X<)D0!IHWIp-P_C zB=Y>H#8svc%kJsG+F6Tj5$%_qXNySW`AgzNoB|Q6fVH!h+9C>;)q!kXcs>AB(^C!O zbQ{oV*^3}AdxEGHjkyTsNSv85o!i>)>kN9=aM8 z=Gh?L((|;pbWKa!B;L~Vw6}Ck3v3W?>3P~)x~7FTh`01S?JZr?BAdipdY<-{u4z#C z`)9+~r1qAsX=`f5TY8@Mmab{|q+7hD=V@>0hUlpkZ|Qm3Te=~p)rz?rOE<(&t$0h%)85hzF;Xku((|;pbVEcn=`B4^drQ~EeH+AEdY<-{u4$uc#anuw z_Li<`X)2elQSrMkO&~#gOaJ&RGqdR}Jx_Z}*EDoGy`|@AZ|Rzrwn4n5=V@>0nr7ub zkCEJ@Dyyrlyz>-2))k#?MzV6r3@dNGIu8~?(>$B#RXtC8RoA2d1_NHz^R!oWO$+7H z73(JPs-CC4s@K{w`E6R>HuW>CiKclri>LK2;%S{ffDKQWYO{D+?;@Vo4GlPYTJIvB z)(tIfvv^wXBA(U_EwEWUt#=Vm>xLHEES}c8h^KW!i)s`dtx}lYB7EkM4#M8Q_ zr3FXR(|Q;2w5~~>(Ko_Sb`ekO2DDi`t#=Vm>qb_vjGorJYESDt%)qnxI~swaJ*^wc z|HkwCvUfWpm-e)-t*1?)r}cPBpfr*4Jk-sN(Vo`hDfF}+PdT-HB88sTHOAU}D(<(e zwX<-*r?NNWRir8`Km4oIp?F-s_YdQ79XBQ7aoq=x>%Pa*nA9HEea7Q@gKMxmi1WDK z;0?&OY>(?p(RcK??#C+@eV7Q;>s!MhFzEpa&N?Q+r?qU4j{J zL3IKRXHkO!4QKx@!JH--pg_a(I!Q2hs08!QmSFy32^L%}!NLO)EPhRbCI6LRX{&T* zzi^NQ%VtTiVzmS-cSx}6W(jH@kzn;v2`>Ixf=kk%41tF0A`)EMOM=VBNU(8<1e?}M zPUpmVCT8pz(> zQG%<^lHlqKCD?Jj1lK$w!L{#5u=8gLt_#7S1+sVLO0c_Jf<5ykxSqx?kiD0F^a9y8 zye7eoUrBIN26QrzeRF3CZW$%PU*<`0`xXiAd_aP`-<05at zIKE4QPY)odpWQ(9#Ph}a*$veh5(d?Bgn_1Wu9M(Ae%~Evx}c;5gN0{HP`y}!^EXMb z=q3r4yduHUuOzt8f&mOPU7jPss@@XRjF;e|B@(Q;T7tC)CAj!G2`>4k1nYj5V10{L zlyd0+2`-x=!G;SZ*m$i3FMcf*e2Jgc1)9G63ES56XpN+fS;G7KgMxhRRS912D!e!7 zJ3P?z&2&k#hDb{H|94EPmHp{f~avj{xTKyWYz9U2moRuAd)4ry9TOt+e0u>6(;z zFEWbX^;Yz|-fSRyU;ABeMZfDpa0VsNY+NZ2fo2niOE9ref^$xlV8(e8%)V5DIlB>L z=V3TYSd{j=-r4wFNBGYMq8)JkJRBI8-}N-{yY9i-s2YEUGe>ZKz< zRxL*tLqzFtT>0qEakf66O*)CQ^*WSgJ6or!InLH^M584Pfu5rhY2mt*3%oC`K_I;r-w~aJ6lh0Vv^d~ zdU}>gYG>=|*(Rx-t*19NN$qSsJ;x-qv-R|5CaImRr?)Uk?QA{0rAcaM>*=jbQaf8u z&r`Qz>*8sxovo+mtGgtrovo+0HZ9W5*3;XYq;|HR-a*x%BW!2u>75mI$7?%VPw%3p zgY7t5Pw%SsL$>2=J-w%TQqsoRdU}bvT=2%(dipTM@pYW7reYi;*XY1)B zOxieGPamnM5010-^i!nvjvvoVAovr&V_MGi(-A);2>*@D) z=8%iC^>poQJ^g@I0_|)){h-zX?QA{$QQe*pDzXsyA_4YNM-H9S8d6!mj@%SGRO4(t zIEgLK{0S^^wjQLjb*7Msd+B^`n#I|A@O2htm{D$`#o2oBZI7GIU9~t{55DWB=X0Aa z&entPxZ&J(i?j9MK^cErd>!du7g$#CkV{OMWK@g|iE@uF&enrXJTCJ@>AC^=+`fym z_24hkK9>oMa!)VL)`L|ZZ38(dVx#m$`XI_Zzc^dRZ%QP{(1k>KJ0Q;1gB#s7rD@V8 zF?tRWbhaKm>_(7`w(y4%bEQGNl@Mp^!L1%!iu*VMTeXBhJ=?y`_J$R?|c|LE5 z#MyfAU~Ht}Zl7d2TMwRXIyTmhC~up@*?Mr6DTi70*s@9Riuo%cbhaM6$z2t4!{lLf zT2s?_&n3>*gLfEQ3=-v?m^fPx&P`d4mK5cknmAhz-s6@L6V#bEapG(}tevfgi&$Zr z5IS2A_tPouY&|?kr?j*6@Gzay&ep@_I;EYhhllHwcD5cKp;Ow~dU&KxX=m%nX=m%<={lvIt%qmolyXdf2p0!D*w6pcBTAk9)*0VP2ly zT}c~ZJ6q4%PE9(Av-JYWD9+Z~cYOqnNabuj>%e#Hfc*Nnw-;yYS&vIJ{5!~Uv217S z?Fpf?^{gk!RACY@S$NctKa@;1;jg4?PSdfH@06F0)715kY6` zSxUA-DohF>8wn91Jed$@>sg;kO*A5(Cl%ssJ?kqsnI{+GY(49Uw(~4FW(LAi197$< zn!-ihcDByL4RN*}YNxoBFdN)xThi%lJ@mXs(@8-Ex#NtEA-9a&PdyS3^Q!m11~&l& z+u1q~OT^iF=nW%`+^`^{+sUM}_0ZSGypf`rk-$#GbBT`fj76NShh`ca^P9zoCoke` zJ+#2hq$F5C9>|EZb^OE-l}m~Au;KEYMx3pOUT`;6J6jLEqG{1mqUdZrbh>Qm^eUr! zm^^e1#~{iB9&xrF>Sb`!HDUodGGPHn*Aqr(>-ae!T_sW=WD|m1k;KCxakd_+bW7rv z%~foahezUUJ@jUB=kiENoUMo6cQLIf&zQv7dT6=n5viqA&~yvWpTyaE=tcvPaWPyT zONq1f&<{p1wZsf5539u4dT4E8f17oJ=UC!wJ+$v62p(~Xv-Qx{lr=PhcmyWS)R0Ax~$?#d|U^N>xPt%tU_rUARi z%IC?OI9m^0tq0iYJ}aNcapG(}v_FZ*Gdgj$9=bG1CJ*k!*?Q!I&sBCLF#{)w~o&=m=#imqAtJQoya>!E8Cs%!u}F%)O(p^Yge zL@y)Y8k|0bX1TkQWS2fi0&G)0PaMVBdgzl_HD$))Q(^MxY&}%q(Z*Z4fn-b|79Rba z5IS2A_4PJ6q@Zr8rv;ok*;(;G#U)6ld$9PG+a-lw-^e4?V@%dZ?Rc7D`vK zVl*P3=b_?kJ@k-kP(%*XR+J~D;%q%sXbb@-k`?9AsW@8?H8ga|-Z@(7eZ8A%iuAsI9m@bam^4IRUX`mv-QyKI7U8?a>dzt z=%^Mcb*4xwpXa;cY(4aui${Q!&qH5vwjO%b#nbtC5-iTvLtnahk}vuWd33fO@|!j| z1!~po!SiD}TW@$>PoB4mv-PIpZ2bW&#`wG*SF2})EIM0nD$dq^l zrAs^#kBz_OH_q01SS`-hL$@kjoK%l0X0(Xs*y3zGbhqN>D@ltw^N3rVt>p9ao z2eBs;*XTJjbPCSaS!r>$o}-=*fHL>y&Y} zp7Wb03Vb9`fNO6aw6j2c=EZ|AD>@N0akieb%qRmX$Z-~eK2e^=i?j8d%}UQ$u7~)g z0n+JgJ?9G7?Sd8MdA&GW&)K0^W&4(n2l(P_JtyFGox9258`V|Y5w^|h%Ts zdRTf}cX=ky{>9mP4xO!2lojQJ0CBdSv)$ZV>Pqs-fH+&viF!E@D$FX$#ftKIfjC>w znd8-V(OKzn6G$eVt>;|eHpwmoA25is^_=-8JJnm?!JmUttV}+85NGQ-bhe&w_bx5f z&en6j(>)mF;|X!Lo^z&0_mPvy%H%@}akidAXX`w?fs=VYQThIB(sEF=fJ|q!m>p9C!C&?-+ zlSH~8%7-K3Y(1yibds|UTTwnTQMX|V=5+8TPT44*pop{eoV{l5=`#69MVzhY)VS#G z_V5vmI9t!T#LP(1N(-@A^Epe5a&{4skRJ5aS1{6T-yd;vBBn& zY9?m!H1g;*u1%VHv6p2rn|p_RE-P7lIZ>yv0ReSe7nn3b$zZz5`72R$k=G`u>M$Ep zqh2FQIwPb$L%}W-D;DNGu~mG6sLNUBCW?RYTEZJMiB@$5pk`ds04 zCNJ0XQP%+p6#D+oGfxYoI)37#QHA3*#?; zU|4jqY;S~bwHcZ3v*iK&p2G|HQ#Xik*m)g z%ReTBUh}=$YyKecVgeN2of5D4UhOr%9R?%D%_H|c%s8+1njgm<-j7^*&G$N9^QB;` zypkMx&G%}r`5ahVs@4$dlr?-KarBz+b-dPj4hNyV=DQJk3A>*NI>z^Euldv^Q;Jo5%wcM;`P3rYT#TgSNAa5P z)n4=6IY%~ZZ!@*me0R>`0y60}->bdm7yBqy8gCU3bC}v|K3_4h%PgKoCcWl+wb%T( zoU0wC_L?7;a~GNPn(x(K^Ev4qu?NZI-ovZC=EpJLAd~BgS9{HOGe`YGCcWlIZLj$W z<7|y;iiKMsUfXMa3}O}cB9~tCz1nMjob>Tzz8isb+iSin>!`Wp(rdog_L^_H@NzQf zHQ%ef=1+$Nr>%F8NoPG??KPjvpJ8f?Rs1&xp}ppFA80GyEDL`l`Am^s^S#<@ev*ih zjXD^B_L`q0p|~3XbO_|tUi00Gsg--YgV0{{-3YDROB{sun(szv<-VGTwHTtP_L}cT z=u)0^5ZY^gBEn@iia&R-+H1ZW>lg<7!Vt7U3iWEQ`H3*AxQC61YOncj1Up#oE=N@m zMz8r^+iSiaeQ8_&^U387H7Z{7-7<93cH022_L`rFa5wE~8ynSL^W9ii(>}Ff-a7G` zp9r&x(@`O*m3Oar&37YQP3ukAkxDep_L{GoCd*k2SA33x(_ZtF#ObDPc3|3TeoC0T zy$9_KQSCK9DTAxg$8D@vd(BUYwTgq-iRmV3ulY$ZSF?ILSnW06WjbXsH@6pO5!Q7% zdP#fDPbtzWUgf~F*Zf3jR`E>^LVL|mMCctv@hgNKXpi!=*Zh>_g>{$vFn2Xpd(A&N z*4^)Y>|9apH9vJOSLaW&GkCSv{F7&}im!CA+G~DltZNt^cQRg)r z*ZkC2SIt}E4peuL_L_h43|8?N2dlm2r^dQ!evy+wd(BUsA-1h6zQ@U+z2>LRAl2u^ zDn9OH&|dRXXK;GSDi;6hqCDDbe(DUd&1m@$Jo^-{`BCjPpBb>dj;(Mp#nJPj`~v2n z*ZipVntzfUu|YWHI_99){OB$8n$HfwF_+VkdJ|Us98o+Hh-$C-t~FS{QXv_5u||DD zEWPH#eWv!BuPe;DSjC=h;L>ZpS9{H8Rh*SKkId%0`_f+X zHQx<$Rq&V%i)yd=ZdhUkf3~q+?KM9o)+)~HVVA7E=DT683YHT_ulZi>HQx<$RdA6F zi)yd=ZdhUkueGsW?KM9o)+#>iz_izVH_TPRe-TEn`Cjcc-wktBFwCPv!lK%1z8jWU z!TvVZtG(u@#9GC28Yv>R*L*k3Rl&W=Kp`}y$5t1)wTbB z=3dDjOIOk@YPO_Xtb4CzEEn7@H(8e4jWNd9V2lm6F>(itJJ@t$LN_%OQxXzL9z7I4 z2nmotAcQ281oD!QLP$sfLdZ)V<^TQeeP%Q&yp{J`>;HcFufM#OXV2MtpMK`dnKO4j z3c7VI;2WNASgH9v`r@j`mZao7P^tMrw?F|0(~Z>puu}8qa5F_WUu4YNfj^gS+${|& zHDC7stQ+fLCT{mU1}Zf_=;2RQ_CEbJ-&d*mpY0ng!}tAIO3e?(Vk4P}|K)oqH9zRl zQ3mmyEVUV^)cnu()9RZfTQBX7Mop7!A^d%7Ii7{%6jzTlP7(e1OL4jbrs}X#^TP)i zA3I+{%@1F#A*JSruQ@eMThxqSfTts-RUN*eYzdO!?wx!#J5Z?kx|b%@{DDHv=k}_l z<_{EVJ`=zvu)2{U)O_w^I8EGF5^8=}sQLQnwSvy3DMj0lFs8yn%@5ipjH8Xz{II3w zb8F2p6Bbba*Fn%*YQEKPpq|wHu%+gUUfZzgA{t4}4+}M)w>=OICBdeDqs`^Rp-}S! zHn|(xTjI;~A~k+{0c+C`T1m|h z3pGDb3^Q4%`2&@jA24Y7IJodLTsA5D*8bEu?egdHNsfl*HBHzD!mfdvZ=FwSeqN~g z+!M!FlJiN;&kHqQHO_oe^YcQ@S50U>srh-K=BuV)K7Wgq7izw0-1(&D=Y^WDn&^B| z^YcQ@S55i|QuFgd%~y@3=C_fWpHDh0DEDZEnx9u{K6U&`gVg-IQu93(srh-O=JU^_ z(~H#nyi)VO6BAX*1=u9)pyub5nm=of7|nuqQ1hn=bDv!IJgE7*nUtCK2%-t=Jr8QW z=n`kcVy6ATH(c%+9%FHz&4mzp)*KpUy~d7&nq>bZHU#XH02_p=I4ECevgfbxv~%HnIwKa?QX*{$*xMf zEbtWfLMnKgmeZpLW4*@F;^rT4+DOgMKPA7Mks;Y1PrZ4i=5x}-V4L|S%O@S;Q}daG zlNgSpS^Qr#V|?X>njf$nQ${aZNX-`mu5nWH^GeOS*$OqkBU>}GFKZ*C#HZ#5 z%rkN{lbWAbYCf+vJoSl-nK_$QQuFgZHD4nqSS7gcIf51EX+gh>lD?O9p_jp3hAMiF6_62+9 zL7tPEKj2ZOVbw>wQ^D0l>71blN?Q5V{ZOUmQ|~yfq~;G*YCeaQ<4hK6{!pdn7g8lp z?i~kIhU%kVX-2aTz|>Q}E7aD4K>Z&1Q4FS@P;4qwCpEvG)O@;vnt!aXBsE`@i8o*} z6SvV)j&WSC)cmDsgyU~Xw24>INNRq)QuB2=VU73FNNRq)QuE(q9Gl_9ANoe6=9e&R zGnRok@z2zdnqRNf{7%thOiPSP&F>z46g{{RU$4~sK)DltgUw7_Ob>37*DEzYkb(8s zP7hM^>y?_Xxk&Y_$My6eHNRe|`Q6hvL=RH)>y?_{Ese=S&97H#KHJ=DU5rmUPjTOb zGRdX|X`832)#JxhVEF+zeh0erVoYP(Kfzee#gBb$7CzD{XKS+Kr%i&%p)FVC#OY^m ze$y8FhHB4n1yrW#R2u4-%7CJo_JuucPQeW5YYePjB!pD<|%n;Zze3QKFCphOz@_=QzS@xgMKA!fp3^~UW$Sq4gljLs+ zmet~(z~2FAOolr7@CN|PCPI5h=X4U(ndCQ^#7dm<=LLZzP9P0Cy^0o-1_(Vdy!w>PNTxQv~pyggw7w?mP1wB9G z4u7f^hC5Y*A8)6nm(%VCFpddh&I+MH5Xu6-<|pdz1XociCj9 z+288K5SpJ|x_U3pg`Mp&GsanV#$|v@tr2$iME3A^7!-E41b2_!4*>?5u6xDMkf@)NhkrrqiDDdLa$0I96B=CiCUIp~;Wkyc~4^A>0`>4I=W zA9leyf?aWbJemPRW&*sqX%O&MkJI|n12H#olYr-D*?Kth*Ad;Ehs*j#vl%Wuy^_6Z zmhDFjzIzVI2xh2LH*v#q+1H`vL!PF}kR!t>{HsGaU%CEf^aZ+`k8CouHsoi*<$Shc z^^G|0v$a3Ra!lIL3LC+AuBP8v2&}mimmhrDAqZ|davKme@^Bx$)iIlgwSg{;mcREs zcqO-dg3~SU^YAtf=G0?hW}Ck6^~FgXL2ylPX@q)9nukgp%&F(Xl>;qhU9ugnRZyQ+ zg2M)6X*RzFPqyYSm}h#+88|(iK=!AQcJzIu@-t`4_n6Pt%V27@n%i-H+VwbGhD6Tb zrPN?^7-nVoE!$jXa}Jeex%VNmlj~B}t?c(Fne5iSTaIP2^Kp0w$!?jB6O`~YL}v-j zUl@JcF$l77r`n8mc^P&p=mX6V)|psr6{`q@vXz}{eRm^#XTIyBp>2-5IL@+ zZt(Xp>Mpk_Uuz|snNb~n{}NknDcd0rAxQFoq7A}Z;wQ$z*bd0rAx zId`5)A}Z$2Mm)3I58ae<_dnaN^L{Qz%AMWfBjs-3)W0_P3jU?cfvu$6@#8SZ`QO|y z63SiYEuJCVy5T0WY;@sPI38i4+}W+2l?dbMQT3$U{oj6$P0qySvFGQwI_ea1_j{Jm zan4%3W|gT^#+|A{d+xy9zfKW%swz0^lr=j{of7U;^T)G<=d$$ zeb(kxTTPwf?NrsghjhC-rQ4}$-`>8s8`LS>PF48edn7g~>y&M0b)mg&SlHGn+Ro|< z_8y0!QKw`(t8@4M2&=6+1>0F&bZ;a5lxt^o>Ajz0zKXT8y5>Ek+SMu5PIdS0d7jOt zP&-x4*?Ut7RLZnd6_|F!&g>!OsZNo0^XKW+OQKz!675tK+C!jSodWGtRj_9^W`a89 z*{RATiPbp^n7TT})ZHxUxo445SErOZRYj8o*PONLjMGe=Lh7E?toI3!u2Fz=7h||$ z%5(M!kgidHbXFJICqTMJ0n%Ar!9D@fH42c<>fC(-q-zu)oz+G636QQ)fOJ-u-X}o1 zMgh`UUGqKx(lrW@PIU#5`Bj*j8wE(Gs?hw~FrzjKkj^^o6Choq0O@R6kyZlfhANOw zA97m>q#LS0I;;679*tWGq#LS0I-TY`%`13Dq_XsB2IdR zqNu%(OR;Xah)pB=F|>K1h%H-0oO+Rn(+-N*`htjUzZ0?jV-Y*~ClB4CJtIY&xj@8O zXNlN%P{i3!i8$v6BF_Dji1R|Y2;HLdb0RL7DdNI*5uZC-#QuL1@%&OL;P-xmG2#}z z_^K#h`n!aF@ScQzn3d4W{Cj?G(JMEJc=cEjuk9A`<8M+#R>yFmyOA{`q7-WopO(|14t9 zMfI* zt(bSIi23)4IO16m3w};fHLI|S;_|47E2~6YHC)7j`68~~EaIBai@5eHBCg{nbZ*u4 zzZP*r2I(xz+}I%ErXxk%yj{dCH;6d+O%b>LOvG&;inzT$Zgg(d9pglN@n{iu zZWHm9>qOlB4G~{^Q^Y-=h`6tYn`Bk@PZsgOu_7KkL&QT@i1_-~L_GYQh)3QP@n~Oc zUAR?`HHbJgTg1Pw6Y==9BA$3a#FMXz_|{)UJXMZa(ye-WtcYipium?U5#PB&#Ip~G zc?0|9E*W;dfVl+Wm|*9U zxFf~0Y4|6oPZZ!3|H?0HgsS}3Aw2S>jZ=Il5*ET-&A$X;?iVPI5aw$BhlIHj+^B^x zSMyI0=0?L=2y-?69bvBG3vd&{TxEU`+NvfIs^W{H8YRs2N1I|PTD1!^QH>Jj=CFtG zHsu^d6vA8$33KwM109n?A~ht;i6Y*Hj=|HEajO@unnbZ^ort67i#YyN5o<0HamrUj zv_A<^@h*~|#N7MyZ)6uVTEZOU?=eGT5MXQxT}~dDvis>j;HD8c2uf}ShMD8k48=7N zo;QU>?U@`Wf9cbb<7!#jy7o<5O+jJOJqm%^B{*1eT#9!$A;-DvrCN!3P{_fgH~@tl z=Wd>dlGzcd4hUmYk}0KlOB8aP`>e#!flD`&=6zDgaV~ya&hrFwF{uxE&R5~?bU96s z?<%$gvoaYs>3#y$z&_=;PZ#v9=Ph>FijknAuF6iqlw7=;38BSur>)s&iq~kUz|`|r zCoHex_3F5x4e9xg8#Uyb`m@Dxw22mPSH~UH_>Ma@Z`3Z8Sa{hBT&Hn=j zw&$u__ zA=;b-HHdK}#9`@nnt4wyzQmDp?QRQvNpry>Un3V^>1b^2O0Hn!Yvkf<9oEOuXlX8K z9+sdl4tJ}%+n_q93GT)oMs$;HQp zw88vHrkZOy`82thpC*4QjgX>lWg1D$3UpY4YLu4ElIG%1K20v3ALtx$(bi3KT_~R> z7cX*_Afk3((1S}x`82usmv-1_Ti;IouX)Ywv@OOg6I5%PCSCsRgIYFQnh%EZm2&YG z+g{>Kn;FVACJ6*Z2ZwRhDFlKdArSCl!%r1(G{a)>l0i}=1Oi^rYEpao^Sterzl?`h zr@WQtCe2l=eEwWKINa4g%|)zy{#<;}(R{RCT-D0w&&6j3^uZS4Dp)>$EmlBj-Ahq-!i;BmFb_#PBRzyvmXDppW7as<{@IkD-hCF*FCQNpqboA43;c zg#+y);LDO>wXhEV9Uk!nd|7J2m!%~#$S{iu__8!^R~HJtEKS&;f-g(UZBW6Nr4=@) z;LB3{fnAgX(5dJPC{AgW4J!DuwAuz0d|BGx1{Hi+T4RF>zAUY^K?Pry*4dzfFH2L- zC+N&jy@D@GGfn{}r%=6uFH7rfjTC%YI>-hUd|5i!`7%1j17DU7b@;cdeeh-JFy}Iu zeeh-JaHj}q`{2vcQO*DfTkvJ+BNf?R$I?xKOBls9l7K9VG3pyULqr&L{LDNg(kZRYmY+15;5H`H*|yw zvKKcq#Fj$j>`j4C)Qma`r7FE|1iM&3m!%J9%TUtNN3_%NZ8_qVKB|50X1|SMl|HWJ zkD?&oLA<2PR>`xg{QD!dEMK0_p}y9FE=!iO)v6c3B%sR@f-V_CBwki2KBo%kvg8%L zLx?eb9@QUb(GNedQ=sbkASl8g-o#WGA1uw8=2O0aE=y_ytzoM9*e{^V5`r#uI_DmMfG$gZ zZ4(SM8g~W+bXjtPc9g_Pai_ouNeJk&WSMQ)j(ViI zQz4+slC?G;86Hesiao}pkD?1fmnF9ci^2*gXhZMp#>J85Hiv*NOYX9M9WH5ZehBEY zWPOkMXi8~rjtJLvx5n807qqB#wwOzhg zLyNN`(HiB)Yv`Elm6%b>muqNA_F<;7QbS9#H!^gRhK|ktglVqU&~aG;@5)cs(6a0> zrnyl=$7e5PnwvDV+@!c0C7{dlGg&XYnwFtG+WEx@`0i_^OVVXW)xV2#A}uQ~%pQj4 z-O7tNusE9&X<7L>4Haa6i2_va*N~fi4wqx)#Ttra8wMlvc@0IgSK|S$@)8Y|XNL?z z=u!<;XDhI)Re70)YO~j1^SDx;VemV~><=d+bQNaRcL$tgQS*8BS^2h< zL_Jd6<`ZOP4rNri}eZz-ZFkh!r$tY^QAz zEhUN|PhMy=Zqf>xGIpDFkpebW#cWrMwvvado(3G)z zKf{Cjz=EcX;l_^fdDCnWlxv?y0%Gkr* zjofY)G-YhO?Eqf22^p}`_9^Z}3z{-^X<)dscqYZ|X+cxQuF>-Q&4)i?McX}?rDOMZ zvvDI^(3G(Yx~0k8Z9!AU-qfqPBh56oyai1e8{cW?!Ru1J)KlCN7c^z;FC9sk6t~C) zO&PngORh53FzeCbNX*Z%>$?=$I&fQE(3G+LJ*JSpf(}Ge#?}U#lj^{DPP(j9ireyn zri}fjqnI)@P5LF;h^CA+gmiY6W}q4u5HnA6A79Xvv2h`NaPDwPad%&sl(BcamQ>P6 zbKhT>l(9y;sPtpo`G&6qgh?425n6(5@l#=xI>G4bk>c9|VN%8()(SAIz^+D`?-7Ja z89U6*0K9@snlBcFNf|4)dP!OiObNT5^f+rG>R;w`t$=#)b%Zb}W96UW!8aAcq>NPs zJ!EWa8hnW%Ov>2Sz@>qSfv+`$Ng2DTlOe?y9m1rHy`;sGGRxjfif=!JNf~=PV55Uc z@s)@$DPunh*fbvBlL(VC_D;Yi@zQ*EB23CyA6o}MLmiW8zEu%)WNcDM_eR7+dzr6V zgc%w8lHH*3jkcGk(Bpz(Xy~um4Vo?AP=!w#G0**~P2gwU?EUCaTHYC3GC}m7D?Tfxufn(O%pH_YT zoAwl42pmf)a4ZE|M*?rs%Ga4m1&&o=)OA=trIo<3qyon}SzG#JJ{I6u(g%)7#%9_y zS_-fzO)7AVee9*0kijBP0xj$4N8nh}2aZX_)ThI7+Ld(VhoMOYj&)B^#@4il=ttmK zQh{Ua53j&&G11_35K-vz+2o?P@&o$!$F2jH0c`Tc5Le&oBn z*~vu<`JrzGa7+{MyHpdXj%88n8*ukY5;!J4c1~y=MH9c=N-A*daMaIBM(WzS*!NK2 zSkObds&%{Xp}?`AhYYON1N0zpEUCaTjwPFmX}!-kDsYUW$eS+OUZjzK3N@*~v0%)$ zcYLD)$AU3iT|90Ga4f07u}EPc&zx^m;23{c;N{ucLL-4=Nd=B|in+o!DsZe*%sn&` zIF?l4*gOmtKiTVPBycRLz_CuouhYm|Oj3bkLF0^{&`97|#siLZ>1Q+JZ=Ori1CDjL znAUR4F9IA(DsZe*>LY1<0|P$e0mlMSXUwIQz_FwU922W)J%xtoI2aT-#)-?X>wX&f zol{bQW4seuqh7YHU-dl{I3}x)!1y(7-=hbBV@U;$bxWd6V03g);8?d5TBC!I0>7n7 zDsU_~Fm-T`^gR?f7WB};eYEePz_Fl*4(@IAI2(tI0>^?Ln#*mzhXTjCdIaW;*5`a* z1&#%M{fWV}zT>$i6*$(_&9uhRXId-;js-pZ3Bt@6K{o=&k{)o(_I_WC-KK@K5;&F- z;8-vXt=gHMLsEfbT|I(TyTkL%C~z$38>rg1J-4I+$GW`=~r&ZnPyD4z2N4H>oulFKk6gbu`LZH%5dcH{oj`iqk zTL0j?DR8WtTcBDAOc+{y1&#%#PPv&Uboe&Wjli*_0>^qx(zG7$yD4z2Yig$TT;D^1 zV_iLT1<`sB-N=keDsZgF{1V#azwvz)IQH4T!FI32$dDW|3LNV>R-o})ya-7Jj(v6n z(|Wq^tH80IeFNQaiyuLOW5EbA66K)veaZJ#;8@STfs%jZM^NC{XGbutLmRz@SKwIB zzJZdT;73s4SkDnUmUXR{`4JR2)^h|YJ`bk#Nk4)D$9j(7w~}dn&yS$Mv7RGzEJml* zWA+!|SVnJaO^W;bacYZ-HbutSVn+jY!Li-#cxPm zgtgvDF9OFh3LKl)u{O|GTN(jcP?_mV;8?~3j%k5e7SsAmS_vFWDsYTN@$bB!(n#P~ zQh{TgjKZfQa4f07F+K$PF=ye?Uw~s71&##^=+{=NWm-3TzDWg+1${Y`tsCD|NRmkf zjs@NPBu(q%o?AwNV?j4gQZ}_;(~ZEfqyoo+Zh=grcv&HC83m37-TX{#lH)wzqyoo! z^fj#~cy1X5js@KU1w5Z_5xj{^DsU|57AW9-o?E7WIGr{o=+?D>KlXf+3LNXv*R+1> zyD4xi=oTno9+#dFIja4hK7wSWWgLQs=b;8>5orgfUOHrN0>^@Gfdcj$i2@QhmQ>)_VHjY+mA4)E zL+D1}SW;C($(V$I3Mfkk;bv2pS%gDOJS-` zUX>lfhqMEX&$qpomLPPshTJR>HOXuG!;`?V$;0s^$h4}HHg0z0vyx5 zv;qA1FeAV*{-D@#2Jmy_i~z@&faA>8jSK;faUa8J`W25`@eVX8z%hOF64I7w`joaG z;gA&ISkN}HYBX#FjwLN{j9Y7tnK+z!0>_dTIA-C$k^&s# zZO@5o#HLGWBXBG!z_EZ$?uPc3_#nLq9Lp$htRr+=NWetKaYnJg~<`7@jAow!XICF?EYY=>yYC>~}FKZBdnQ98=@V$J4 z;LB9w&LO_6LGWd&iOwOutU>T)s!7i!zN|s;Wva3GvS#AT8j=nR${pXT1uT=|%c$cg z0K}IyD89^75ntAz__9l_7x85ciZA1DC=jvaL2Q!>zN~88WEd4+HnLTWW*+fn6TSx* zp^7yCUv@K-G85kNb-9&36I5 zEa1YF^+m}9A^Q<+4A$3 z24R+IUDCE$DnEuW?8_pXryN$vrG(_Cde#nP-YpY0w=zlUM$>0 zDCbyrDT1{;P0H!Qzhi;Q+BG!1j`t+xIQgT)lCiYSWR^Aj$)OLKWzA%kHT>DuCK_%g zv#jA=Yp)JBlUdgAp2MF&lUY{9Z@n9oSyqH$#;cCZvIb?Ad8%og3zS*rsmLsAP-dB@ zBD1VPnPr}e%(4b$mU${N%NmqfR-rkztY|{c?_fF7;4{nQdN(aQX<3NdSA)+iQ_B(8 z(?WPygD}fPn;3!w_1)zWkNS3DmZ_bC!rEW8eADR=N!fK8bW?6&JEK@s8 z+I$$B4B$sQ$hGzwjLtkelL;Jc@+=Df9}R~*9jG7c9?sKMI8DlF(&a}|!Yu3GvILn5 zv#kHKOW~GuPoVQnuK$xN%PdRryDeds^_QO^I|=#WObfIo#gDm!S=Rp@{c)Ap_}!OD z@e40umi2#|3j4d%)N4E^v#kH4Ov9=d4uR$cl+I~9P%@PtW;ZIcjC#kJN@iK3GRrum z9A}m=%Nmtg_9`RF!+*yCm7#j!e`2=b_JcccSE#wA8Q)Otk>AeZVh&ZG#MH?w8%Smu zT^&=s$5)bBCd$MoFqzitX(>mkK{Dqz7P?>EZz`j;ew{`#%LXd5Ojjh<_#+z0EE}lI zvU0|;8Mgk_H!8Di1j9CC8HlZMmW)`kfyylF6n(UBRAyQC=*Q55d-(&ESr#aFVgO2F zT6fTc`|JaiSr*8^dfY${GRp=kvrKc5>RAuY>%uG>sLZnNY5a^HWR?w7W?8p1W(l)w zpfbzY=1yE25aW~1W8DH2x0X$tbjx_ET0N$zz*GW0U5fW*w?6$LhQQ)N^!kgRBJ4Db zg3B?_;yel+hJdr=5@?(=7UD203;jUooe+MfPj$S3p~Ns86HWf1jc1%p%#yFuD}iXC z_!&4Zfd!GBU}VNKOMXYk$LQGqbsSfzW2mcRq8)|=mz8Fza}5hMDwvq}Gl0P?Jx9hE zG(#oMF)iR&)Vh+eCbw2JOdEG;>W@2wl*YQvIIm^HjCJQyeG4usr@TMPgGdXZDZji; z5?GG&vF>KN&w=}~SK*8`2pvbZ8bi);t8m|w^`)t0D=m8hhpqVk(rVm1@b9Nqg3)vQ zS~jY)Sh)_h3Dv!8V|-*K3UT5INLGMh#^zbnk@rdR;n`xd>H#>hQ=`_l$I8J-e6?Ts z6A@Z-I{K3C=P|GqfkzPd9s(uKExz9B}(=vM*Y>0)8FI zUyBfvkJJ4zKZ(9+-0p!gDl{h(WmIx0zY%eqlZi4axl9B9jcF~hL{d)>u@_?acKed{ zlsJY|zwic&9%?6gXpo|Z^hu(*odlsliXJLUGk2$*AfQ2t9`aNK0S!|0P`UMzh>9Na zyd-LkEz5fRf?%oe)h#s;J`}*`x z^iZd#|Igz#%kTai=YR8Iud4gwS)Pww+)k25UdPl9L^$|ZHV0uWJ5~R$upF(e#&B-` zIU27<(L*n@gpRYG=%E@#52-5DPV`WXqK8yfu%76l8buGO%55ils7BF4s*1J~JyfIU zAyuW<6FpR;=pj`#w-Y^7qv#=3?b|@~P>rI8RE6Ik5j|9+=pm~MZ6JE6M$to7SFnNT zp&CUGS)IFq=%E@#4_RGw1JOe@iXO7M^ai4bY7{+Wb7i?mphiVi(G;W4oy+jYyD0)a$p?0E&Y7{-Bs)BZ+hiVi(q$*QO z-okpIhiVi(bdjXzt|xk^M$tp6iq?{7u%76l8buG?uUT)r1dHY&LLM58(dyvNvvDoz zJVd}lRu$U#E7Wg@aEGj_VB=TOyF&y!WL56QyQvcDkX1!DK8?x_5$KRrr8geHv)2$| z4p~+6#?k1FA%YxIRY7D9kwZfSIi#x4oZB#o4H4v!b=t@u0S^)4kWDEv^>0`ZHY#yQ zm#9-o9BNeJkk$MX3)`tA4mB!qNM|=s^U5%93iLEC?kSypNE`}klsME`L*h_p4T(dY zHKQv$jk$jw?q+7$8C*Zzeht2|Jq3dS|K7FtE+G&t!wOgkM7Tu>fv6A!qQWN%7*qmL zp(PL%UAGvjAc3gp)wR@E0?{mt1rmt*fH^dl0k`kC1r+Ym3q>49dXwue|E`D?Z;3dG zbOg7!ogcru#p?v{Q@m+eKZZ8%60zla5vSfK;PZ2hZVp&tyuKyr(JIA6re>qNZrZ4s}YBjU9?MEv;o6eX*h7_Vf_u~-hc zC2LoUIOQx6?N^If_camgpAoU)ry@3fAYxM$mVs``<_RKBUo2wl9ueDa7P0+F5j)-% zvCH5xa!Ynsi`bJBapq(Zdl!m0XOoC?&lhptts*XXNW_INirD{G5f_KB(sN5b-(SQ7 zV?{i8tcZus7V-6)MLhfrMS0V2MNH(ETyFWKl@yiJvy~LBLqxQlF3Oq09IBi(Rg|-r ziI}rP#N4Yz%zIeG{MSVs@t%kUE+!_ode$%zmroFJWwVH@mWnvAO~lpLiMZw=5!e1u z#C3lbaeWEaMQ-&CLqvRGx`-PWinwW~h?@_JxaBDk2Y)Z(*1ovIxz)F&MBLsY;*OOf zzIdjHJ8u#3m2Zl;`)v_lD;mIf_Y4woU#p1wPZsgOIU*jsS;Rw+iTL`fA|8HU#3TJN zqq)_Oju7$KLJ@~f6Y=j?iFo`;5l{S7#FL+j_*N}uOSk%|$s(R!A>x^{MSS}<5#M=C z#IrvY@!TgOzMHONyysg)ys%8fi(5s!%y;Z=_3K|1@r!3gy!C5{e$_=zA1qz_RTn$; zA|p;4WG4dA_X{k6Xh@F)qT#r41qejpPs{kjY(5J{FTyP&9Cn_@T!L_n%Mzgwg>iR^ z&rc(ipe->Jr>5MQhs(h;#5cjLqM(Ut{|PkFaySc`sP?}@6IDJ9H$f9s37V)j5vt~XaIK<=TG-~`5D`sOt7xLr z(cMtR?nXpG6V(z;G;k6^roW(xYKbNih2JS||An*R;r3s2q==(Oi8y|Vh&88+IOQ@C z?O%eZ?2qo8#M}o6nrNs+6G0|$$H3^opzA;r4cbrt8ls761WmMm2BJE(L=%PPtz}Vr zMibSYgm@NBRL3EdR5Ve_-HkxqK3bpSFzk~SXyGkbuJv0lV=Zw8jB+q(j!Hoj_1klYLUMXmze$Pq_9nEwz8QwqzP1MiG1>hu@3$Wz4Qwf@=-*h=mWS|h6 z{xOAtZps}5RlP+MWtBJtG*M(#ZVKk=$Z95p7RwV&6j`I80+Z!EP|!q?_3F5RD+Awg zqlR3Qy-OT{CW>rV#~sx8jypB>9QKUWGQ3D-VnHy*%nkaHcSVuKAI_&MJ zCXUFWi85T637ROf(WceKJ;pVgpotI@MwlN(X_gKn<*AF5ie*pslDR)F;G+pny5n2L>Vq; z1x*wgY$-lWO8qlj-3pp0a?sIyv|e293YsW#W?!i&_~fkF)q>tO%x%TNUnzr7vO>>id2OI?IUQS(qV0| zj)>z4|4T<2^W!p$CMqkDL55jO&_rc%yX;UjQCY$U6-`uDZi9*@Dyy(TMH7`(+MuF| z%BpNo(L`m{HmGQ#vi>%xXri(j8&ot=S*;B!ny9SK1{F*#=t35UtkWu87G1!iq+(v!DSSy)z&jPqKRU4gT>Lstins= z@ZTXQXrewQM>J6%!xAxFqKUjvLGI#ahS*Ygd7Zl{5Q>`7N1;?@_Z`kI7Bo@W!#Sdf z${x{9H#wq-${y7|cXLD&l|8QIk0Q-?QHGSuRvE;u^6!t-vV4R1fa+^4ny7RsTP?N& zCP5RG5>3PqBJr|H^8r@SM5V9j9YTy5KF|u9sPrdx3RFFxa0N|N`bJQn<|D75iArA& zCc{TyK@*ieD*bKq&cwL1=SK>qhXTnYs7Aq9kqn=g1x-|1Zf|%pe=>EgK$?%zf+i~c zyVOr-6P?odd@X3A(p4e7x%syiy(wvR`1~zsqEh2@NkcQq@X=h*M5X(KCFT9q&Q5%H zDrln8CxaeTqb|RKsFMouv0l(brI&>m-D?Xz=L?#s^s!(YjS)0aVz4x4hL8V(CMvBB zv<8S=)K^bHLlRA-b?4@QpovO=rXI5$)TO;j-^*N(}kVmu4X zDTHXEiU}HWbH!5-nyR5_j%cEaHVswhh$gC-p`mn+XrhXl8fwT9O;j;ULnCrT6IIOC z(D)qDL=|&1)SM%lsA8^$X61+`s+h-l18AZg(L@#VHMB5CG*QI@4IPyuny6x-h8E?B zCaPGZp`&v|6ICqM(Bj-kv_{488agING*QKJ4K2w%%v4rtXlagUqKcC=bZm}jqKefT zIxa^vQN_s`T9zZ4sA8jrj?WQIRIy1z%T1cwRDvd|IFt3#g@X>?F@h$l*td}`DVH6U z?T?KpqKT?5%pHd3-KvW?u-Fwu6IFdqLj}2Cq5xI_-LZ^ z@WXI3>x?EEBWR+kM?OZcI%)2^37V+t8~P06B0_=(lRk|$qKT@$MTL`~0>hpgcDFC=K9s!y~*seti*h&o2ke4idf6ID5~h;b6sz~vRXNFv#2&%asC}^Up@9K=rjCHb>%ScKfXrlNE-kvR*D8ns7K@-LE4i^F5 zs&NbTL=(lI4XK_Q6p(Ah47U~qO%#7CBu_#Kd7NQ4CB!eoT}MF^#b34Yxpuck?nMfk zDE>z~H_20g#^esApo!uqTO;-McENp1K@-I{1&xf3Ig8twf+mVz+k=(6oPs8bKNqah zP(c&LzpuK?>GUF+D1MAAy6j?IrbgP(1Z?XJw?_p{6d!Ho>MOCl;(cDpo!wY=_sZ&p~?K6 zHlm5*4I!PKr5UKk1;orV+~5~9QG8rTADlZ}(%kkJG*SHBt|gT;GJGK*Xrg$dT~zw9 z?R>*`1%f7uj|iVPS*;k2j5c&nkZiW86JFvA!wp_RnSAmwx+?i8-gZ^Zw*`; zm`(YrcNd;=nAqWDW%EGe_>&7}ESM9@U>w*xjhm^9y&2%0GVqkv80@db*Y ziQ?}BY!WZSmnwoLiubW~@H5mgnc?deK@-I%g>-L3JhYej4o1*K@h{m88sBJpc`ZGN zCW?Q}ZqRh`7$;lQ<7D64qKU@eQGiG_%@ZC@;l%^@Z|X-i-NuB7CaRz2#A$F$(;;6s z-RcsL!(y6Vp@C?k`dQXsUkOb7kQSne>gQIEF-F!9@n@M_f<-kGt9ULBf+pe}-mwlF z#bGiXh$gCEbT|~6f)pshVJzFcX3`3J5lvLT{6pNLyyD61M>jKZH$BLWs6TP?NMkr+ zY({3%O|%hBRKNBs`7W`g6q5x_RKNajQ#xCD^`3)LjdPzu>^h#N^YdUw}_~yW$h@;wZru}{rHe$*gdZKil&_peFvs6`> z`NE1OVngCb-TXESe?0a@->PULYi*%bpZ{9^Ko_Ek!ipxE1zSe~MMzoDL}5h}HDlCu zSQ}_1nkcMjqE6Q7EwpaHk}~Y0i6mpA$e{u(O2djKVjp{{>R{pUGSjbO3PclyeKe6& zOno?9^pT!QQBN*L=%M- zO~kQeb1`kN`$k0*aTIye#f-nvNHkGc(L}+RGs-z#i&4=;!I*7_)3^%jhOnZE#uPGH zsk~`B!Z#|Kh`%`S@@!jABhf@*MH6+3d692aG*PFRU!?I(+^oWiCgMf!C;KfLi6#mw zny8cU=QQ#b6IL`)&^R;18_O3C!?oa{iMsT&nVI%n!XBEa!^N~sru7PJ4TKd<)G764 zG`@iWpYzZ}fv7XL(n>T@*h3T9CcJ`%=r|Y@O(bvq?JVE+0FC^dA*^U3-U)3($VFn> zUhzE?O(d(2!1y&YKBUKqTTNKeMBS2@Q8maqD4M8S3T+eUz)vQ^iY5vUOdZ_Id=Etv z1wC|dpXqxjnkeX@gZmbGoQ*?H(L_NH&EOhc>oCC?$O zXritj!K!`F^UW!mDCirg+MhkQu%d~&x|z00JeWzT!ipvedIYLAg>K(H5mochL|Qeu zopt!Nt?~U7P1G$pt?D(to1%$&bPLw^TV8~mqKUdi2vquao^M#uL_PYNwhBxFS|vpj zb#n_;Ym)D)XrjQ>DK~Q+0Bx)2Ml?}a(L_BaY1+>B-4spKH8s=rRo_F=L|r{}1=03n zx)DtjRy0wM`6aZ;OEL1)SJ6bD?Hg?Osa~v{qKSHr6=?kRUWBlsi9S1mX}i()RWwo0 zzJYFd){mfQqF@9WiE_~T{@wRgG*QpKfs*GMy#`V=(Pu|6ZO8b&iYDsWH&F5m{RoOC z>N!HkvaanBKZ2r(dX6B)=fSl7&X1sIqMjr8tz_B;U^3SMp=hF>BXAv{i_w;OjPO&8 zww$7g7y--cjyqh(z|rfWj>gE5FE$_bi?4T2x9_zkIxu(p@z z#Z7~pqKN|cV12bE5qK~&|3qJ+iEZoCiK)Jle6?z!a@O%!zNT0m!nm!zVJdh|7I4ZfSAiGpr{0xqE2 z1YQD)CJMR*3b@;I%PE>D=+?D>w|Tx{MHBVtYucXk-4sm}bPE*lUAhrX6jn4*&@E8F zTD;^Fx16Ghf^J<4INkFNE1IZBU(>eEbIU23DCia_;1zTunkcMjqM%!#fRB4_IYko% z-MSX=*Pd@!(L_D^;;P5aljNLJG*QqkP{5INBbq3zXre`FgmqaiW8Mz@qv=L8QCQJL zvdeAVSP#>-&-2JBnkeYuPgV9l{dM10(L|r^8!W?*{aA`73dUk1nYIFyL`s=cG*Qr_ zqYUCZTm2MG^x1w|eN)HQOSzMge%&IpP|DTgg*YGQ>XF8AqMvdtPS1v^I($`*Xrk}| z#^>8!qKU#+Ysk%!L=nEG1)f9`O<9U3L8et5zM*U-lHf*O4L?rI37Sav(rSn%$_bi? zKPbjuOC_2pCukxjfUgyGBSX+c+{bVxMn>aEG*MX4MEdA;3Z2cw2HJ=w3JaPjXq(hb z8_`5zizedMnqwv{rT(vjptoout3QK!qKU#5O(c44!--eWNHkGc&_ulLIdP3R@nPDC zCJGChC}5Mjp}i&k2fc_U$|;(tBXu1nL(z6-(m&`$G*Q^1iPTMuGr2P?Xri2lCJI<5 zj;57pqOhQe0>v=11x=JwG*Q5y<>TPO&!O0)>|6UI;*`4yiaL&l37fD@Hn9-@mRyF7O@Rqj6DE{Wn9#OaVl?v!6KeW0T!eg+0!-*UCS{tw z>FWR!5?x|HET-vo-vF498sw^O`q(!BCe#r_Hd*z(MN=BXOfv#ZDB!}BtxFSK2op*H zCL}K2ONM2%aQ`bMFd@bXCSnT7ofuEu8>}J zV$&KFQXW^RWfminE0j{M5T{D4Rw>Lv;R>aEu27GSiMg^28Px4WLMeAI zj&u%L)Sqo_qTy-e3Z>q) z_UiC7a)nawIsE-HxkAPera!O1VP+r8!P-YeLTC3Z;CmkX-s^x)6uR6-xPBA+;>n%Q)l;rGzUa z+C&T&jlOtXaEosju8`U}D6IW4-!5DswX;9Wf>&uGS12W1Au-u&bHRIlG~o($WNW5J zSsNK8K36DUZW%^1xk4%B3h`>gQ=hn)=||B@u29P73Tfn8R!Ode>D%Z-u29P73I%-n zz|l;cc_cNJe`fxl$@qqe&Hlsp#d#xk*RQn20S|jZYj5w z&fj+Z-$PlhP?{fI30G)<{D#;`$gf^Vhrv(3X!~Qrm;vwTFQdf9kGAk9=V<#gZEsUy ze^i=&oaf{U4S1AkSoKjcX#R@QIYSSWH1qT8p~@Aa9)AUeKY|^qTpsdA`t zg+66Od2;VKpfXe+MV1Zs4&3~%(6p9j@R;_<4_`6$gkrOpI=Mo5a)ktYV`4sRC{M1C zC=ah>1Qa1so@oYHq42I7@vTC<7*0!n){L(d4uM$NQ1=AFWelU(&XNRj zYe@ZGf+Ir5mf^&qBkp+b@;vb&j;3jxuP^fq+|WwMfw2=$BUy9YG4y*JX*yNT_o1=Q z-rDFin%BRFyWyZeylV_2I`Ie+a?G-88OUD?+hM=`0!~BGf{)&?!l$zl4WcgLlM*Y|fb@CYm)AIYF<*#8h2T3PEovGuGUzWdyQ~s_)&63256)R3O z*;TZdIzHkoe;=nmN1?(iSFG4#!VmEy1hc#d_kV8n1jXkP;Dh*T%1hU0o7gQPlX@B{?S$uF}1YDd@@ zldKGFch`Ok$qLn}*(&FB7*Ans@R8l}H{rx#Yosq2>Qp|C?)DPg(3#kgHvX5bJR1dA z_W}%$vLx-Jt=qlUY);l%RNQIZ=HtY)FSBl*`AF;b70>NWx-H)YH@;#INx>NER50$# zF8>m=e3sS5Wyq0!4FBqGoS(St+pMjb)jYePyBaf5G^0#r$sYWdfIV(0` zZjt$#ODdR=E;iyU^QzqAZ9p?Z!v^TXG z^G9UhReb7NDW;kHWwg`ZVA#eJ2WkC~Cq+1^`3g?Tak82xDV%%_CmUbD37hlDzaXFJ zrh5_O_`KS-=uK9Jf97m1hu&=23+H#?OwCm|Uq_P8OGwI?Q{PAI?;`lHjrD|;PGa-V zpx*p44(2pwm;OMqOQ9aj?qlod+In=(?npR$*`3bpoJB~deJW;dHbS$F8*#Gn2Pg(hwNFd+kS*2uHsMQpEY-b%Qb{9x14*F~ z{)X6Wggb4lf3vcCBm6?M%c34^gg;ux|Lifl=wEwkgubg08Pu{qW-F;>rI+&aF~`|T zYFX)J8u)KaXOLPZ*RkW&?ZRx=?IZhb6ESP_3vaR1vhAdn4N+>DZdo*MCwy#(Qp?7t znY*){46z|fE%Q`MCqkvvvdPvbT2`yn zGF27qB(Eac{{0PwMs2h-M!mMEvr>(nX2aOBDJhmsb#7vaO#LE*-mO% zty0T&V-?P;m(;RarIx8Gw4KzlTBVk$s$lzU%mlScEmM`LBZ_4wsAaWEEqg)Ib9a(j zR;$!9RYmIvJK0HUS*=pbKGdvtlUmj&)UxFmt)!OiCbg_lsAX0a+D&R%qfpDNs$e&% zWsO2DvnqEtsb!5qEwifVZc@t{g<57+>D{E3H43%Ns+xC`TGlAkGF25s=8{_0DAY1l zh34LdNvu(*W!7o;6rZ$)sR{i^igVAXAP-koi(JEb=HttrW*6{fJNFSV?fTGmS~tL~+iRrgZMdZ}gq|E89`hDWttYFRI}te0BWOD!|K)H1vm z=%tqRQp;SvAKMWPL-tb3dZ}d-v3=1?E$gM0S%#C~#?rmiGWkW;?gX~jdZ}ggz0@+i zsq3Yd^~2+IqL*5xKYQ$@mIc{Nz0@*X8zi}T8c6lBiG``9lg{t+)VWg(q3xWBz*eXODzl1 zIOI$Ap=s<<^is>XW7Ua0)=MqJeO7RbW@az73_s@!_fpGxsb%3_Y8k#|!)mJ=)~T0T zW@)s&)H3B~`6MwVZt-JM(@QPurIulLF92`qrIvZitYEA3Qp-H@yR1MJH?S5Hh-vGk zmUV6#dC{y*(@QP$SOGd}*eVLQVk3c2Lk3%}ms-|KE#oUD+4$(Cmi@0%%bwD&R(q*s z#&m;{^-{}vsb&8PA9D9n%X+D0Tsiep%kVjRFSV?fTK2C*ku><@Gj%GuD9SpuJOrcOuD$x_aSJ*5toa;6T)(V+|FtQC0yn{sF2=uys&=DDSut>n2<&UWMUk~H2G3BZaWk8*Yk&O0b)40S?V3FYi5 zXgerp40ZAugoKy>650;RnWU4T&Qi|$0!!3EIg=zT<%||f#2Sdx?v%48XgerpQg)%7 z9SMC0$K8wzY^sp`1x@8-nKK z!R){E78_%am86`#k7R{%w$Cc}cNl%jSqvBwj%%fyG1RGi91rc2p=Dy1+xUO8@_H0t z-A6F|fF)^PZQV>eTN>mqD~Eo~x~;*9X}{RIt+eKESouxQ?eBD3eiz(2C}#|HDwt5t zUWK-Ua>kHHIeP==C$6tV+0dOGl(WA=-9b5H$fKN91De%AIb*0B8Fdd%T5+<6Ckt@$3{Q^3N$I_SenpWao46TZEs)PXfSzNv#HL(fCBb8@Y@RLm zzmJ47a3lrj*H|R>4+KZqSWQ+s<(o^9@a6&>%xTQ-pk}uU5t!Xs*70*a zX7^GcyVIGSlFvSmFUh4fY$OlAatR6}t?_N1ur&tWhZDBOdY-T~9^?sI<71w%HD=t8 z)<_~r7SDVrtr0>{TEl`}Dy;0>8YAG`sWsY>6k4Mdv86R^tR^eFx5i4%?o^xIS=RA$ zJ!W@#AiIv%=zAX{|G$Dj(=YZd0&QzA0u59*{j&!DUqqmBvnYr_djR1e0__cii9i$m zzrvGqHw4-m!HPLS1e&TkA<$IS34x}nP6#wrbwZ%2suKcDRhke zpjllf1e(=#LZDe)Cj^?+bwZ$7T_*&Z>N+9NRMiQArm7wgXwP;>psA`80!>w&5NN9E zhCu5-7lX>_gg{eOcLdsWadmnTXlStR%S^u!&4$Qz9C}MCn7edV~l?KBoSLziP(0&i0yZZ*ztW4yM8NT_opKE@NbE^#b;(k z>>V%SoW&x}-5}z;i$q*-qlgP160!fMA};=&h|e2*^6M5qP$%NSW)TmaBI4`ki+K2} z6!E4PMNAx0!O*0c6y?*4D=AvzBHH*@>D=;}{6jEq`K-K%*)1aGtPnAGkBE6Ui1mBJL>#meZ}gkADHmt-OD}hzHt5Jb1o{hi(<|^~Xg#{1Xw682s+vt$egf#AD+{ z96DCSzwZ+9_?;r2cuK^RZ;AL;xR&vr%8Gb;hKOfQ5%KNMiTKW~BA$Ip#B;w9@!h_4 zjQ9Lt5ihiecyXDCm$!>}{VEZ^xJSfW&qMU9EOP!Rve+rYx551?BTge^(Nnm7oucoL z!LgWs5q=qFnu0-hN4||qV{ha#jBpb5*V zPUK`*Ly=q2F$Iy-1Y{POHW^zLkynszIO^<;e#OxA}v!P z+{hB3QTIh7FQI_@(viUoA;txCMU>wnvwNM$v&ieaCUOI+dR{2<4w`4*z5-MHE{b!> z)hy2X$SYEu4e$s>o*cyD3>e7bj2Xw`d|b-nd~vLeSl0)S83!2gs>lq^^^r@^E>7f| zqZxf$J)?61l3ZHhJP5@r81P7>AIf7Qqr|xs*H9?31MN(=io4-<7uvz8%p-eeKq6Gd zzpFD~KP1l{`kz5i0ClRfJ*$ei+*S;2OX26qRLam z7x$$a6{=b|0v>MFqH!XQ9w6d){sj@YYRz#XPT3}+{St@@gP4<;d;b^F6a|B?Es1{h zWEAMyl7C=^#vs7hItk706z)Z#^84vO;HDAA^q+Peju)eO@W;@G;!+6BJAy?uedH!r z*hlVceeGSXb~fVm*@qqp^{r)T>)JPMH3fx9_ap>r*V6hNtWKX4?{3a5XzIS}rCN#O zp^$?~aR53Sn_=60JW6Iqq&gsMk}gw9@s{XpO2h40iJ=3RZYIt9q%#FqOJ5`7+(|GO zlj?)&I&07deW%N5!i7R?`UhSzZqhAkeOvy#7nGjkm zciNhbrf7|Z3QRq3b;DMSGSQ;->bPMsGV~ocYREP9SBT?iIBr+R9n|=aJ2m8*J|kr9 z(C0}unA_)D(r$fX7yzNd!X=}y>(R)}3m?a!^l~O&Sb>B}uaMJy4LCg@r{~dEmLrAd zKwB~SMYt7S0ByzmpWrlKgmz+!4w{TZ*z_qv)!n{@YcbwamotxsLCvW7N1+BdCf#8; z4(6JJCT&2rqDdbI9ey|g*Bv3+oCGz9u@d62^iG-|z}PRk#F2CD?mh>32PsH%!QwnZ z^OcUq)~@6V#(A9jYw_0!n5yczq;YP*98u(k1AQqS%3&iNny7`B8Nzjsv$}noDN2XA zcGBUXW1^a?A?NRC-lAn8C&8plnoA?+D~CaMAf$s!Gv!*z`AP=*fng2}B*d(EFlnx! zoXaq{irT{+HPC^TKAXnRBTCU3VI9@f*kRA;N%ul1`W%J@aY134>o4biOru2`ZCYL2 ztGP6Drq{xw1b;M)#nu{2-KE-%&iS}_i{97fOmp?-G|q$O*pN1uAIVg6P3N3P_YWK= zQ6`NroVt~1=w<~vEI~C&%VtS)@#maDm-&It5f^RUG}nbr4DDaE$XSbs+I>L}E*YK2 zC&J^GcGzfJKS2GjdCl##EygSpRBM|i-A8d=%VtZzLFaXt+l#i?_7Z2>%uuc|e}VDd z@8B@5I-L_wI^Fd9>Zn1+@M3ckJepw{nm9=M-G3l&3@>OkslEL9F;JAh%xSV*NoQ%U zTAeR*I1CPV^-psV>%7ze<3UIB(R%Tw;{0+N^k)Y2!4}~v*g3rk9_Ko|IJ>vmcj>W~ z?Y`WRa~(+1HGS^ky{jPoC;G&g&nidGgFautMHelo=33ktz@^0{82cQwCe3xW^AOsz zs45(2pTfJ2#BFa_Bdo*!j$7%M5d3jT#ED`b-jtNcAj2#c9TG+`ZdVs>^mn+hN)k30 zjb4knrKH>jtE07auCT#$G>Zb1RN7!e^aT{Bq{;?IL>tf}CDk@KKDv|E{x;YgU5M$j zq{aqkML%V*)&`G^-oX_PbX%IF`MZ^kK?;3?5oI@jA8ZHfMc){3@ImM2{T#2rel!HZ&urGK_C)XigV7qbd4kr6K;_&bc=>H?G9gyI(% zmWb(oxuGLekiEE>A+{9$2w?W6KqzWP9mRIPZv?wIhp|vy@^F@^lsuxHZn8;aRr09z zxtskqidFKsmOqMuyoNF)UA9V|UFF{&sb%@{d=B-s2~#)`*+-VL)v8y(YvytQ1yI}b>6^IH}XbMpW>6P^C)J!$m_vm z_@wJB9SrrO(%&}k^nX(SM>JjJP#~EE)hHM%lICNva~7kP+Z&$DpG;jVkm3`wvjpqi z$ls-Y_SOXlljdWz^AVn~A|C0?zqRO1Nvp%hY$t?OWCSmnSf-9NG?O%+z?}lLRAhg! zq>GV>ot>Vh$8VX{lR*!vQJ3#R)Jlc;^zNKC34WJ_7~N|NKF&MGvZ9X#+i0}&CI(z= zurz0yPx;PXI@JbR!&LLJ-{D`kjJ$)6)#;pj0M51SyIYmNrju55if9`)%Y=_OULnO`3ZW&NngbN0!-!?WjkZI~C4BjP1x;n~w|+ zS!=LLV@&!ay8QAmxZEBr3ats+&^s3+fi$-{oC{f=yR2V_OPZS>&MnMmeUJHQN@;G6 zI8DRhc26*kj)X>V-{g0w%*w==Y&#~S#CR5%Q^-H?e3qD?Avaq*1)-@Lie{tel0=(^ zsW|D`!O(mSEzBOmAWAII&{5fg3@y~qqU?Vhj?f|v9i2UdhtI@f4K4nEn0xQ| zsH*i{e65*DvLQi2CV_!WGBXg8j6p=CWgrqDgd!ja3L+Rn6{#wMXo5j03MyEz7i`CN zRF1uiy$MN=N!v@o_DRivnPq)-~HV`?&tFPu-9Ji`>c1hca^>No+l-J z(Hh|yCU#`W`Iu3|Gfix2$$hjk%fzOYT*26E6FaKpUD}*$V$)0Rqs{pyc63Q^+FW8{ zGfK{-&7~$bQ^j~1r4PdV7H(v{>~5N^4_DjM2lA#VgcLjMs2&QF#)>6sP{GEM{jt?3 zIFpXWuDGx}VrQ9HsN@qApkR}UIVI0yI2LR+v7C}}jKzYpO)R%$8}`@*=a^W2NktiA z=bBigqyU)}Y%#HD$wk;r7sxYAphvvqHSDJgF5qks=uuwM9@9+0g)DWTM<10j9TP+G zf!wKP@-fND$mAoElhIE<1V1Xc|1JVAjD zQvkQ;IbCcemHeyH1rHe|1yC{)yMW5|81n_cnFgf*%1aSMnLK_k5gUonvWd}QN&tD7 z5HZ4IJ-r_03ie7#3?jy3K3xYVEcnVV=CPm7XJKF9D0UyRacd@JDM7|@XtCTnF3*|V z8-%jrJQ>t?QCzCI32=9fQzfRR&z0&=ey=Lw=7M(yVTc@hfC;|#kgEVwui z7xf6_k^7cS&%L`<^2kviM&;LbZjz?}ladFK`ZkQ!-1$~Xxm_-JJgNVSJw)zOzmmx@ zXYtfh*TF}yFJ)P@0cUoo=ybfOBdsO26hn=W@zV@pzp zCSY5~c?zmi+!yt;xb!DF1CC!!SRC_EZf`raQ34}3Ac%@3o}lU-=+WGIza?(6%xWvy z#(Bc3YuIRSw{B6MyXu>fBKJd|QpI@?tG~jC%3W<+#FUb?v5m$9h0_T0=Z1 z*6*^EkF+eMMeqn&&tS)$=|{*~73aaS{+(`qUuz|gn{|I&atGNC;HV9Y!%Ex7coMDO zi2`vhOQ(&yvx!uU$J3gBYA^R9Q+}`cRE%fV`VP#}xp%hG@gQ4&i77L8Lo1s+-PZp| zK=-j3&CNFBJo47((W3^XojkZ*8kV6R< zxt@smIrq{QMYaGQtLslEX$yxu_n`df1`#WhuY z4|U@QL029yv$He<#TY=$JRU@!=qZE@444P!W=M>u`}$}qzinAku@UFFzn)D%FT1Jq zQrr24uLN`>-L6ky0kYNRyn@LMBF47`x+80Q-wXjTD}!QO#rYmVUrWILb_U=GQt>A# zUzCD8+sY+#xtNrE=b+aN2BM>GSpkFK>j?d4TypdO0KqpE`Z#(^7e7LL+t}bs49&0K za##3<2Ifn?*3h#!n_iiwi19^-UKxX8w<(sCSG6AgIs#OTuSE1Ee8MqUR2TF4NY*%h32_oNrb15p3fj0dqDY2-C}a)uLxo`6qio z<0)+~Pat9gGr!Fq(6m4dkR$2=a_*gZ7gylG!0ST@x>gUm3JoD=P2CT!Jd1{2K(}=t zrt_%Ks*>Mt8rcXd3ttb7s(hG==LsHd750@t>O*So!qm|H&`5u!WDk*d2hD|9RF(S8 z15CL7KzswsdD8->2%v=E_Gq{6lMjNxSdaplpGfjpRSm8s${}jzzu+Qn@nrU+x2x1_ zBBGc@y4P3rb$^N~B~gRdQFk3K-51=95t5c#N-;#&V;x!LsA*kw#eInn+Iq^Y5`4=e@<}XrG zj87lF7y?O0O(aSeZ2wF-#O=qztM#z+S-39nv_zinglUBh38J!Mrc}f3@wA=wT3E5# zB(>)GZ`kXEJOP(TbQ7kt1EFg+@Ex^eI!na$@z|D^VNczx?8x-Ff_B3&yz;hC#bOIG+m$=n2riSk&);Jj#=VXZt z)7kKB3=@5L1t3T1zG&4pAiW7$Ldb0WAQmncB4y@NkQzRTklWE^iIemXzKqz2VP4em z6&_@@UNyAY$^nG%qK04WK^k>WNZgF1%)F@K4|H6DdF|%P|ip0**DPDi3jm zC6Bt93RTRK-Ix&#s!^!}71|LZBc2O>!2PgxV;Ts{XQFHsR7(^)jbmuG)Z*)LFtNrWF3 zxOvtbPUU6To+PRQJnKqJxy)1622R8dGA-rVRDO)bDls9jFYK-qX6CE8nabXD$ti)W za7w`$!OQa*D!IfYjtX3kKJY7R|3T&DXt$Cjfxlzgay!WDXH}cUwTFNlaDNOtnSb=-L>l)`C?ja1*nIB=Cd#;Db3Z7J*Zo@eD z1|rttQZh!TaNOg!VRE_CLrmAlpmSOxeDg-lyB@Z;o`yx)kM$-7Rr6mLmN+=L7V|?( zn5yZ{C0UA9qz}dl;zuli>xHHCwbg{}j-ZWPrZYRuW&x9`r*=6Ojgozbp%45vOx4bD z0f`mCgE81zBK%dm+r^e->3uCq09B=cHSg_uA2{H8k5lzC&jbeOMzkGrsvuEBUb9oDjddtGeeip;~X4Q#VA zs-`3MuqNj_GDo#A=_}v>!YbiJiAORAWB25@<}2WA7gkapytO(V*0O+Sx!Ao@6PRm~0$_EqK+x#=)p0pBKUI$Q8RnK!kluCIVWOmzY)sS1w&|0v-8E;i96>oUw@ ze^9_lF05osaNZyL;K_ugFp(tsXD!4c=eOo7;Po!7WJ<99kNnE|;4T-NI6UiWc(>oA zuLZw%VI>QL!{HTvn6Cx9qENa7>t^DFtbd}5Q}71=&f8`>n6PUe18iN^(U|4rl-umy z?b^G*MU*rKuNab!@TMw@z0$*;6MO{serxGi$=;UXF)!6s!A+=RY*(FkejCTkUC>lHgC7z$CgP_bjKOL0HIF;^-Fh`eeIZjXg3lDMFR zFHsT~GCkk+p4$PjZ6@ZFe1YwM;-VywfgZW4YAD77ZAB87bsPl~JjmO+k`Qecp{X-Z61%-z)>)i@}J5ef96O&RJrmQl=GEE z;sse2joh?h>SQY4rl-9z4JNtlxd~HOQRndC@SP=C9(+1k4egTn6j5>PIZJW_Ct$Qo zX_`I6pay?TRC^ZU)6TGH^DxTV->Ez%2@|*IeVjaO5%baPDpklPDMgJE-+nnLtr%*w zz8cF($;?2}foTd;J~|gZhhmemZ|z6K#m*5BMClEKj)1-H88{y;su`OFY0<)ADBMKp zK{fWYJV=gEiD@BVwsB$HNv7=DqqQ!YQbTOFFSiVUwT2k$}5@&1?j#G-3 zPQ)^8Fu7xY!rW7|>}B+xk;D(JU%Xfq9Uu23L)-t1#?sYMbWKsQW--|$0q6c(s7M+92745H|fIIS~5m9vo5LN2aQ5$~s08xjP@T^@Zt?n`qH7c5; zZ^AfOg{nZQ#x0zSRb~qyn!m0nYS4L$Xi8Pyh zFok)Y&ta}Av*;9((XD9^!5K(`kpFZ-SstqGl>>P zO@_b9@=1q0uVZLy6oV#om_Nc%hjSQf_*c;`R@OuNv=;%m1#csCL_hpYq&5mJ2mMcvn7=)N>xMv0C;+EOw8JWGE= z-ChdpN2Rgrd4C4YJwO{)Jn3iLe}K~IM(^5oupL1ieO^z$wrXMI%(J$QVz!$rXloL8WTuQZ;XQMQZSo0APeDdviYv6Va+qC4GwnTYn z;6x}}C+|T758`{?7q|hFyRY0~zDiIvBZ=T4dC!Le6+rkhu!sdj6tnJ+1x8`JX>yV3 zS;SdHY+?gE8TbM-NNXFn5>ZYY&jt=hzqPV4T5k_PyDJdJtjadmd8Ps3p9BwZUWG8q zrY&|p;H{CG>(>;FMRlBLjO(%!jb{Q`&-J-K1%sA?Lg!Jf!~CwF zRw*lX2H{#T7njPXp*%ugas`6UV=u!skefxV&SMH4W@@F5yb3{k{C9s;|N0`d0RDls zY3jp;N>Q($2IV?N`=3fI!<5Qpfy;y?)NQ{9%RG$6N)|Ia^HRe^Aw}sKT5|Q(8ysi`)45 z>8$Q3X}jRbmbRztDy1Ec<7$6k|9R7gP05U}I7CP4J6@n?=(w z-0bQ^4?C7czy5@ktCSRdL_)+~~Qb3^wO zc?oB?`cCAu|AVJp_iK~iOT8y#^T_-YR_wG#=CiT9@yBvn|KEJx>eBiNsgzGXJcE(v zM2#N>Bkq4P+JZQ?dq)2M!Zt8k4tK8q2#wcG>E94^tr6FnGk4w`)h(zeVLdRSK>hVy zfy&e!G0==CH0RiPC#!Dl^ceV|5jpiWaP@Bc>CdspHKN>la;J66(_OJcGNSmLWph@j zZUuTB_7_G}U4QqnKy}rJVQw*^rny;Yu5MA?+e@hQ+`dTCEv}b!b73klcO=quE73P% zw3wtSG3tZnR#sjA zMNi23>GONIvK{ri*lYuJUI$k+cJ7t9@0QZPc6LP}9p#Vs>$hU`bsMJ#;1Q!?q#3JW_I|nUVe(-i|o@?N(Y!`qX3KhpM?3D1>~wkKWURJW8o zhcfyxt2@4mGNwzuz}QKbOz>t>lh^8w{m^U%qh@Z#?m=C47WOgv?dz5IPw0B??`P?H z(2Ht%1%Kcd3U+vu=Xv-o18HSCSh}9POU6Uwr|ZdktBw*&*K;5IoOC_=DW~HAMx2fV zA7zj`?Ozg1&qKdCxic#zs5?S}*-IFN=YAoPEiXjrLJVt_5O%j}ZlLV*il;G5NBxv}X1dXJ>aKfi` z!YFaVr}viNjFA$otCwKINfK=2DXSCSbdLm^pOE0}cO|&%UlQEi6^pwQzGs*O_Z}m` zed`%?OWi2J;1Jx|={6+JAUYz9@!&*jx=Ao{j0i_1BsRLY1Y;&iFm|Z~hi;Z&+-(w! z-z~v}ze{k~Zw!h@m%w$M;;jctaQ*-ZE|?&}g^MNFcDV!>-7mq#dnCBzR|zgHgcmu* zmsLt|`6LOh-~pafeC0+7uDV@I~de+m9_xCD2ukl?PZ65Rb~3GR7Lf_p!c;68=f$|=6TOo9gv zk>J5P2_9;a;Nfc|c;saX9{oasogJ_kImJ&LB*Bx1OYqbR37)=4f@kiN;8~&8DSl3< zb&8)4W8H9yU*PW>oZ?+mC3tbU1TUW@!5cS7@ZRGR?DZ>yMRS!zKwf{#k;vo|0hG=MwBn zN$xM*i>cR%y<94#-u;5aUL7W}*MbszBQC+44HCRHNP@SgOYqL62-?dB@oA(zFNG{= zPro)vdk)A*Bkc+O-mwVF9Uj6BX-_Dq55`o2cqjRmj>J77tWA060j0y#g&T0I^7S~V zTt(i8n4&bq1;XZ2AZ$JbJQ3B|FC#}`^GW_c!RC{N)mYeklK(&1eEK1wu=ynaH`siR z23Xj9lK&H%Pv<896E>eNrC5S=G8~BT6gFwtd>&0e2I?NN`6LaS&tHtF)6GaIY(7b{ z`OF`RnCdEQK1s6qh`{RkCx2doQ@E}A4HM6O za|@#9>BELo?c{#F8qxFh6l9B^TXuR5hV%)4gKQfdv&$&N!jB_mxCpoK*yEvc6<*`C ze+7g$y8UK8nif12=C!hzk`uCc9h=ST*F7lTd;qS(U(|aOTd4ixeW(m{!?kcYN5E;- zILy0$V;ytmRW2%8N8Nv}Kr3DENDqcD82}|OD%wEhk`7vhF7_+=V^9^nluA+rgidIs zEFyhUxzHzNtp`jOAArdtFm4`wqiy=6*sHK->62m{vvEV8lyJU4B*q_kTKc3y?ly!z zsna4E++h|-E~=2zfY2v(TE@1w^MU{vR|)yeX(6{LLZ8&>IZ0ur4^uJjTZBHTlad)n zhnb5iCs*-JOJU5kx&QEo+fsQOayhUevi4Eed&2=dI&mTgZz# zpC`8@FY0`u+^#zqx3W1C{*nCNKf!#-6WDeO|44!5A1UadX%D_H{38We84#I7NJuLr7!4Unt;4S}1!AQ;i_4r2$YHfVG z;U6g&W#g8Aq+qm-TmF%PF*58u{*i)1rSu;ENWpkb=9yp*mwzPx5cX?N{>C_Oj=l=_ zrJT!p*Bzvs$oMix?)$1xWMVn_M?*k#vcg)>KLIk#s3~R}wlHDjW~JZU$WVNcK}n@{#PP zSVA~kl6)j?ER@__%@|t_^I!5xUo2N0a5zd?@Rwffa^WK>xG&ie#tZH@PN0(HBPn>m z_<)mq2E{6P#MCDj1=)dg35Tt;KfBJW4W%sK{(QRdq?V5)e;RAvX%JMxN0LuI62_2- z4<&_s`Vc;n{5P4ERYv(VB77wI?*{yGKAs34N&fqOc_E)ygpVZuJwKe!FTzKX|A6$r zYyMTr$w!j^kk3q*Vib%OiSl7b_(<~e1HS2r)=dQp`5YvCB>DfA`uV27C?Af5k0gIi zz|4W{6q%#UMcN?BhbQ49$yd6C4Uo$GAN76MZ%^BshoA8n3M}4iKI`Lsn_(<}} zM`Gq~KG_K$N&aWnps&&RyeE7l`IniFk~D>U{u4fu{41Kp#$G2r6bc_nKKV$b;X3gN zQTRym$wxw&tRDEO92Mjv$?xZ@jagcx7_v4|C_q2-gNmKYp z@{hI++gy(*pFf3*6W<$OBG%S21h5eJ{BPkqYmI9R|A4y@Qi8;ya zLl7HkV!283kra+Ju}G49B!#sm7EjKjl~E>Eo+KYh;b;@z(!oy7L@Fe+23MZM^3MZS`lqC5` z3Xd?cBa(eFEf>x(u_Ke@BPpC|VpEgkBPpC^V$+i3BPpD1Vn-#(M^ZS~#HJ_7M^ZT7 z#Ewq#jG%CdiOoookEC#^iOp1nJYx_(lEOyT%j{N+`}P(-lES6~2uV2XC}BY?s(J%R zVL>b!{}FCWzX5_QB}E&P`{PNl=uEmB=XkP|6rE*ap(I&KiZ+>;lO#(?(Pk6NNs^_c z=xh_qO_HUg=o}NvPm-mi=v)(vBzcBVw8g}tNwSm_Z8foY^0g|&F5n!2ZF7<=B}Es~ zlgLtHxfv}tq);>-API%Oq}PGJq7v!!CB22dr0D*G*#Lz+mK6GuqQ~Smd-Op0EZ0z-DM%UiP?NC||+ zBvQvk-M+Pt@{~weOd_S4TM4_tjn1c>EGCiX0!B^=3dkL2^jvDmViNg0AopQj&HmTo zE`-2xI7WHuB$OtRx2!R0!-9;yNhK*wB469-jS{0wIPc>=Ms$>iRl;r(nQw7SZxC3A;(;?beOUvpHcmiG1i&swfZZgxw^v+O~+4QVM7rjR$$cZW6iHLZn};mgjrI zZW8&?8m5=nF6F78u$x5Iwrp>^PVhib*i9lk{s6(VLSZ+FY-zKE))3DUh213bNXt@M z1dkbo-6V3RA0aDWl!uSPZW6h#wUS4Y!fp~7WIKSPIV`>_y;#W8N?|vNoa-9~Y$8?2 zBTQj8iCn}9O^3baQ-wU&6n2xyovm~{>=bsB$c9!nc>*fzCXtWXReqaM9*YXQNn}va zDAT(%EK@)sk4=T$B=SSE3025rRAD!XoZli>@ikS*16E--iCo&E$QHn(S7A4aY--a& z^f>~^ZW3AGZ%&GR{+w{wPK7+W6?T)z-9LTUMEV5knmdHa z4Wf_-%R+JzxzE&tS((Bxj?XIyxR$N5znus;kW^G?dq{E;$+mLITrMUhkEey?B+^m0 ztbjrAfTFco0w8+wkRmd2lVXw=to;{g*84ysEzSVh6OhF`>QX@wX0BB^Om7w6_I9ds`s1 zw*^)%OnwAAa!5e#{Vr!%W}|!qAcQKBKWUS-6t&r3qI@|ZgesBSG&f>) zh&Lm8@x8(K*oisuIY|<6$LE^;k~)|~-0^uP7E--ffqekx#upjjVzZU9z$GT;s9tjb z{6&DP4De)@4Ld$#akYs#s%Kd>vil{l4ETg|60ZID_JSFj^DiEPsi@HQko`J-vULnf zkm5!J{iA%vA?(-jbF>+~TpLxC?>vO|I)1+I(LqJ|5=3aP;}>fd*?qj@TM?nXjypl$ z8Jrx%(RIz7!n&yee03s>#_@PiTG(t=SaQ{gFI0r~I!@Yano?1|T@l*r_=Wa4(-e}g zTZHyH-aW{U&|wxyE-K2GFhYABUl^om-xfhd`D#XJuj42Ar)O_pprU+hBTU!vB{n%- z-E7}jA=Qa5bcE?TPNwS?Px4aJKEiYz|K7A`8G+IuH%!nnH8m$G-45a(IiaQ z@lULanC9i1CtUc8Pa`;C1T1r@` z3O^2xr z--?Uso1|{u@hbDj`-duJPV%X->d{m$UZFC7?px7XUAcs6{>m%!mj>MW99fDj-Hj{ULGeRA{pG}rJin{LxLf1{d zKHROfG-pcadvf|iSFB5r&NL{~nZ$$jyk+xMW_Nuox=fmDi2hqS;63#ZsFZ0y$wpPl z8;2@+jbYias+@PwKEkEcGePVj*RqSttA}RdAXnC;tb?n+WBEvrl{HM~QK40Jdra9z zHp0rnInb!;5-LcKl?8e%0o&I|l_RMkJyw>`V@W&Zb)h-&A5}Tu0|-5qbe#oAV^p3^ z0O_%^gdWRSkUYY5Y-(8o2Hj2+>9MjbJ(fWbEihI20ukgc$r5@j@gI|s8uS%)*I^K3 zS$ZsAYAMAqp~uRy^jOVW&QY(TrK+4f)G5Z>gxn6~Uop9f?OJkBM~bny>FQcmeA%OT zOH;4&h(M#(d9pcHW?m!@_vo<%=)RXrID9W%<(!UdjJL7Bz-=$Z{opq89iA;XEa0JqKB#nh4$D`Ysa(zLZ3s3j*>$m7J4 zjMd27sUas;$Z%q@V_mDpL+Deg_HE+Gi52oVv7}-KBd#RCk-rm2POOmO#AE97xv2}H`w zlQ>m-J0ax63VEDZ0%0SDc~P~mdJvBjOCU=K;YHQ{=s`SAEP;3zRokW1&B)`#60Ow8 zRJxjwVjR_!dYo9sN2I*oMb%Cxkepbh9w(OR(^2wM(vacAI$wOK)9D106Dt$|Cziog zU+xM^!HH!uk&iH{@Aeen#4-wbsa*ZGrvN8bv!dAvR5d^8m7Ge!iPaXwjL+)sI94<` zaAFyp*RK{*?Lj_HgP20j@wDK?G6uXZH3mAZA%vV*A#!2~#!dvO?Nsr{MIpn9brssl zH6wLRJ>ek?Czc-}U6uOGLl{mhKSCTUrORQJoLC{liA7(sxu{gJr!<^c^dfh<7(JLu za$FVFKmrQyWlcROyLsXM47CsxRCVx^^g z(Nh{uthAK>ppu+eA;XDfmIpKQr81LHa_bvdzG z^s^c>*M)>!PON5#O0A{#ybiE#II+^KUrpuvaaeabv3yC#+)r%+^$NM1SfW*_H>n^e zR>*K-apLlta37WYDNe|6VsZJiN;6}nx}rJ;VK}k44|E-m{;Ni(h^WV(pE8_StxSwQ z+65R+tX39MjRf%LJ|V-2<#$Zu+*f-D!-?fb80UW6Ll{mhKf*Zor$mr^s?=~|`4J|U zj+k`~!f;}>MEK^7)Bq1_II;X#Z(>lXX)Y{eII&v7RO&PrQEE7`{0MJ?P-CtnY&TA= zLM|tk>3ykN<)hS+6RT7>vHUhn)xK~6A;XE)65+2}Cu}2(F~f=F$NH*vunP+rPOO$N zm74A$whJegAK|Oo>4cFJE97!wnX1Wh){INt?BNV2Rx5L+s_%I)!->@<%wOM3c%I~6 zYB;f4CGb`H02doFoLFsQvETP#h7+q5%vY@@4{JEFd{d_^=JxU8M#9L66*8PyZJJc6 z=RKI=#A<0xrM~qLh7+qL!t5YYQ9LvWtx?EuVzrr{`FJaJxQ8{ISbvQ5xBD4xs#3#= z)pjahxLY>i8J*zoLK$@;)!xGE>`7X4JTIHSYOE( zdI=0C)*mNOsmna9;lyei>nr&#FM;92YCA#mwl4LHm%wmhwVgnU&x=YO(8p~X!->^) z0KHG9;lye?LGxyG_1WI=bCzfvwHlJ5W0$$XZzQmFftJLMhG6iN? zRO&El$%z#*oLDT1xAHEglAKr}!-zxr+@M zPOLVuD%GK%o3r7>^22-u>_-?mu|kFu%MbGvaFz=zHJn&}Sjz%#bg?1BiPa`nrEYg& zrG^vB5Azl9O~S~D6*8PyeweR-8qWX%D>a;0ept%_mb=)H;lyeai&5{wN)0ELALc9I zO2WvA6*8Py7dH=a@p!>3qR>*Q<@n}t}L03>tPOOmS#Io`SD8B~J z;~~q5C34e-m2Xl>POOk{VsY8ic_v}yf2kuUR!BIpd^%YT?UI;aiQ~nnt>MIKwr<=+ z4mbuKLKHc%LY5QDz(hG_29@N*Ds?%reA>#BsU;^?NI0>4#ZW_p6RXs4V)+!Nd~_~+ zlFKG#-`dZP6V9Cw#OMt}@5DM(iBkioqGlX9u_}ZUi)Z5asp&X!VpRwymXYXjYgSNoX9u%c>AgEF*EokrS&zII)Z*cN{sfDuffuNaEwkiB%z-SVm$w zv1-VPRZ*;2P@d67$ca^9II$?>PhZH1Rbe=>ToHfqU12z}?zbrZq^`nnVmV<+*x_~5 zMmVt|MOyf7A zGRTQlGK8~3{g{I4;N0X3Xc=3&BnxBIT>9Wdjy6O&Xc&t*i{LK1+H@5pn$gtxt&bz z9D0jWahI+kCsxIi@)eD^B!7)nVK}kuud!--18+LS3AR6OsLT)Gb7^)O&S6=ZRUUFbIulQP< z>0AYC$ca_)PfZLtv1-VPRq>6jO>VG;oLCj#T74u~Lr$!U?=;^ZlM^dKGK31liPaA_ zH~>p&(r{wAB64C?7)~r#L{6*Ma~J^MpV>;{hzPOJ*y#1fSqn-lv}C)o-oR&%zhb_!K8Y&}japL*m8RFe~{!f;}7 zwBe~wAgcBvYBT5=9w(MbJd0J5kx=^}G33Om@Hnx2n6Ut=+P@M*POJ)#6U&F8r>WWi zo1L6k6&@#+!B8De($3++iB;ioVi`SErupETj3k@`agE&rkJ+D{Ndrf!{aFq1HMtFAM zl${c&84hb@4f0V8rk+5jvuXVqbZ{9tu?W?w)1N#cIk7|-E{95`o}?xp?OA3xvCLuf z!ICVshe~o{l^ISfvz@TYU#TP~R+-_%I)rI#hN&2an^780tmy>XjKvXCLx>>lRhi+$ zN=tr>r!<^ct&=wq!GriR!-?f9ceqAsb}bP+Brh|ZSiTG_;wd7?iB)Deu}m&fJ&X99 z2y$YT8BVO$HXPQvoHh(6Rx2ArgcGaGaAL8|b)IQJ_$R?tPHzZfY}$lV$y*~e*Rd4f zzZ%HDYsO#A#)C7HKg__euP;IjU7kGbRK#Xe&mVWKJ|3|-CYGE0%dv>fHL*zL?ZD`z zn;}|IaIR7m>oBjX@_A3S^u)=qggHisc~zC4QFR2fJn5juv{X}#xMAzzX(><<{?s<% zI7lDk?a<*hps9e%YJ59!0ByV9PSinx4OFcs8=><=7M=%)voWGG~zAKTS%J~u37;lI6hJ3Z{>#ieV%p^z~*4y}l*_Eq=WxLtl$b ze&DW}KcW$#ZEcw659zC%XK{`3_K+#erO$~2b4G!#M(+1?d=U!fI8`sh1k~q~vv0DW zV&xH~07#LMc>kSOoFd%PNnjdGchO4)0blGvWKv;9V6P>%czZRg?6DnFJF6_Fkx!sBUC>- z73w8=iIbi* zXf+o_^GY~!u%~@Q=fsehL)EL@QPhT)A?@ROdHa@WS9#i}^|;EGX}3|^cDJnbGm0JC*^+R@aWzXEAL(_dh8vJdd}rL^YTft{Tr=vD(d(Sdk%nY~Ma^iZehVskMUr;;`!vv~R71iCj zz1ZIyADyOp!UJ5Pq2uVZ($gH2rZKBmr>&kET%l$@2p@-`r#jtAO@jFhb2WCtOT9!D zpSprwg6|e;wVcz4KXK{{B66^23XTa}hCY7-MR$j5Dr+1f{0b;IK5%E!G_gAurAj>I zlt2e?gRn{6l%t1H$*+KdM+X!fpPl2T9DR(ZtP8vpNl%$Nl}dgE6r3NJi3#LOU&_=a zp7OZBWib1lFVECHRPrmJ;L^aP0WDI#<|$7ITn{HqOZg3z{4yrEGB6gCt~u*5Gjb|t zJe2%ID0oWXPHz`vl}ReOY6jN?Ud4>dXJoe##*CtpF9m~hf;kv$Zp*V5K-f<0t|Os6 zFSrJMv#+_L#t&1QiMW0V5Qhg#fbcbv6nsWEdr?QNmcdNylW z9F2URpfq{foZn16bZf{b-} z%@BS77Uo5b$vOahMufbTxf3TMMTT9BAI@Ck-%UFx{WS-VJ z>}!}ML_FLBHpXVm2Q`q-eJo-N5jE!laYV*Xc9wJNYI6CDhggvD6jr|0xx7w9`BvCi zol)DuON@=5JjCXVJ)qucZKDfkju~@cV|&IASoMFEy7+n|Rp}va$*9HemB^BkR@)(Z zI#=&|GQNk?_=gzwPDdeetj8#Bqj;@yxDHf_xJo2Ldm?Q+nCz21bK0(Bl*-;fAU0IN zg3BL6PDkC!{Fl@(SJ_wSJfg9HJ@3h`w=$V*&6Il7C!Y3pS6hK}sxmkUsX1+Cf7}rZ z^8gw%Cc_30BY>~vv;WbA#xOg$A(Ch1VCpz&X8%xw#KtX5c`B!Rx?e(GGn;gyS5U_( zGdra3!S2VOaP)bevV(pMIhqbKDO0yo$tg3tvmS^^$V{0g%hWDU8Pm66b7Lkxn{qFe zoHDb^^)J{@rlril!>>^uq<6yg(o%M(l2c}OrM?D(%k-x;JCsUJnc0Wvr!h2)TU+G} zDmi6lkI)z3$uUj2hDx4(WKYqZF%z55MWiZbK#abci24;O`#4>K>a+&zbO9&oAc(8z z;5tw8wnOR@0{E3#_GY~kk2+@JHF?NxA=M5C1qN}QzO!9gSIFFA=F2f`nw7JG>Ibs5HlKfRs9HW9)lB>M}!8s6kW=#py8b<}ch z%KlL|qMTN%Qah>Os-FF;9y8co)ollSNabtGpv(wlqIb^YFfc8oQomElSuMLD@J?xZ z)y-UyKt&9qB+v=AeXeL5cmxrg)w25pYQ38KyI>v>oYk_22C}>v$=5cijUHk`U@t~P z>s)Rog0ouow7?Yjk#D@%a_sUDvjd-jl&rOluZiHSmVHtni4BgK)%@k?$U!ZLGXq=D z9%fea)g@maaWx>e>6@Q4SE5y`oP&@&W^Q>+h#=$l6<`a<;p-@8|CkBl9Wl2hoO`g2cqla-mxZDoLZo}UK)ybik_o;>Ni z4<%7$F#{f-26qgH1DO*lt4mSG)6?MHuVKyG%c#w)@@P-@&{fzGT+O_V&K%-bt|EYE zr@<#D!k^O;U*hRr=mA~p#19a_H+sR>zQ+#F%t%tI@IRqaQ{`=5@U13vbSov=o)#0% zr+~(ITP1=j5e#g=Lp4vTa-EUMsqdiQ#_(eUtUk%vZz499uN!elS=|RXz2SL?Ch9zC z7gasMlTEQQ@lWHV)91TdxR4|;lS{r{4=b+Bb*q8ff#&&1pWynAJb&alMe@4&sPo1j zQEp6pvIGtZ~hntB2M;3}jY zD~qTQ*VwlM!-{jJ&ihrYRaP<(*ZA){*ieo3>yDsKT;uoKu=+ViF`X$zYL%zU)F~)g zQ8!m}u6`jA;v4(v85oIa$?x@)1^OyXD6NyfLxgxokuE`U;{j6Y@O~%>-aYbVZ!n?1 z!gjQk7M?+y2`30_jJH)Hs1m^;-7tD}p9M#s2yI;M&sc<6W+tBKMz`zo6>d8+&U#GG!;Pi}ez}$R;frup)fJvR z>?*FIV&+6DuE8{=yS~jGcpx|QF^ghT-$>9gdIl|u+$vmm@61dVVz}yK^sx+B66Af? zO4^`XU>Zy~Oj_k!0bR`8nz{5g2s*O@*WLqQv_e@aNui@ zC$D@kU9Jk`&vR5cKO=376K!4I(Mm1HO>h?fdIn=!#W*3?^}_wDXg;H13>rQ|)Q!SD ze?{lh)T_Grkn)$6M#`9;HK(6a@=b_}@yCXBn;_*61r3I=Gz@!dGs>d8^RE%Hc)P0Xzo?aUeV`=&DxVNtJ45fJ9YA}qYVR+D5 zHyBc`G6ut#MrJ3g;7xKQ662y%cO=9-JTM@}biIo4QKD`+)8jV@=?2PGwyo~ zgl0}%-EtMWM}9qDcP*qmfeX%+Tt3rsvR3yb#2j7uVGQP(8{`C>_d#b#oyv_ev&JGPy3|8mQ{dfSxaMd?ojry#Z>T zlpD3U5E^eekHuMPjIM`);4!J~*B^yhMAf|xG0$DC_$w>k3o%cSefHGC_H{~;LKg(Q zMovLm%ac;~*glYQmzX`%pcqd>aXWe3R7!U|f?6L#*9}U;N|lX3Em}rI@3(-M9W;~+ z#>dH<%ipjW>t5SeIs6*2?sjNPF%bq~t6pW?&p2*pc#EEipyRv*Gb5#EAo-xS2QAxcP|1 zI^k}Mk6T<{8wT#R_!Mu5Uyd79_crdxNF7`-71Jtcrzu!)C1z>de}H?x`%RdGzY5vk zZAuw&3;I_Bx-N^V6(2zt+K|QZx8hNpRfIO0d#=KvGv)rIy1EloXj2y3b!7^P*rzK; z*qi#5hvKfDjw%knYTsPc9A|KSRt!T?&|NmX0EIu98x3*OU5Kr&t6RjgK&HsQPpea8 z;hUPlX-^+@DhE|?n#5*8+wcVP;heYgU#QO1jK&Xfuhx79DaU<(n@yz+k+Lx~i-=5^ z=M*~8YNon~85xbwh>f>_;Lx98HQQap(HV`SAa5MPOJ7`0V`sI5_4%~%1&A9T#idDq z+y)|agVXpEAz$LMR%?_waNeuWpez6BtQKu7Jg z`ahjyI;;te-P}=o5HlSW%V3Hj_iKf&!9FM`J! zH6RMHHLJ0jQ7&TtjK%}GWoy!OHl{I)Q{`kZwcWQGtPH@a6nyM8?eh=L;*+i}U z4w=fd^)qhj)ym%`pu}vQi;!Sux-C^V0z!VXVpb@3JGm4rF5E)Mmq%EgmjlI*iww%Q z{aY{`(!~m;$XY-UXGreTk3A0&9;7%U1G1Dbj>k`2@n8mkDbj<8lU~7KWD)dl8$X7c zKVdN0%FbLC`Ov4-xEpxcbg$~L7+-ASt?>@)4qaE7ksf-$KhmoZ!$?O19uVX)+iNC{ zov4XB4~l*zQ{10$xxEpE*1>R2eGToSm`!h7VV>c!9b>k4+Hy*@G87@fj29mIA)PDdtUN_FB zZYnS1DP6BmLQoB5*0GJ7aeLxgT!ucRp<8prA~R*oTxnY1&o&Gm6l`mzZwYHXKw2Ww z2FM1P>1QMB4KmeVfxGk}^2Tvyh%85O-67JX55hyMYj~C|Vd$_x<6y{}beFcYg}N~T zIsP@2qmCSQ+iBAdFk{oTHMdQht+dTH&FZt82G7hgt55EK|7duwxd8>>L(j7|n^)XM zUZEQ=hP?3{T$=RDcv@=(IZiiz4SC}yyu5=8*4)d{e{WbDIo5TZk&QP%yyjEn#pZJL zAGuZgtCr?MJ{(A{*i9aS`en#Zd3!0r-!;e6) z5PewqG+&-!PbrAl!#wViSQs(d0hQ}t*PwPqABT$Zo3 zS^ddZOxaL1%mjkV@*Y@0|CyzD%~tq8`7EXGv3PC0CU|Yjev@rwOPIe4&9<^7qK)oc zhZ39ae7I$IHVi{~*qFJ$qONJZ=o)^Y&Bkn{ZP}RBG>FDriVWD8O^o{+)9g%*Kkc@8 zyy52M?g8Kt8+9JMzSOo;%5AS}f_88|g$($@CT(y|z#AxjUNXzV&pg!(&L7bL&HLB` z9@FYfJ>^v-zEuX;6%d%wdTWp-enx!Fzn7aXyWHCP+AYF;ZFX5pSnDopiD;wChV68_ z>{Q&PuMhX4JnXXFzAjS@FIuVk4ejhUpYiKH6uEQsaz1Nn9wN*Vkz^Obl3zN(VcE((7+~Qpu2485iOSiXx`DlQxkOlikNQN0cjhBfA(>=@SUG%QTT>hWX5jV!=&lmlpBD^-%Sm` zvYV=@XzhijA+}SAZfZzj2(lWxAqehh*)_)7RIsgT^ntXEYRr4Wt;XHBOY4J%4#Gz?=1-NgViOU&UPQf!E-l!X= z9Ru+MT-IyWdOO5fi%z^8x920r^_m+um5JAzX0vvNY{e!*khcALAK24Q!s_{ut<>W- z41I22L%@n^cpPQoPH(GC&&{yfqzh0h=CiXopEW;Q=-bxNN3KD4Q)o)^ZOf9BJgs4~ zNpti1nN9PJ4TER4+Pt=cw6S@$tq{j+L3hh~5lYO8)Y`tAWJ5NJRLZs!lDO@(S#zJN zC!=)u3CVPL%6l+!5^mZTn&(5t(@yH?DRsZ$!jEkYMtsrdO~=MoVcLL^c3liW&NO|1 z6EaU$N^IC7_GmQjb^#jd#P&$N3pbneI@~>gteQ~dc3n<<8n}FQw|7yBY%%nc*{> z@)W+YNu1JOD++a9A+LP%+!;5q3y+|mC3wrGsT+*DeYmmPF2Tl_B;nNJ+L>>jEAc3! zD;HCD;}vl5?r-3F3!{1W@QN4Hk6#LT&j%pqX@M(;k<9?a_=>yk2Hf3;8#`}C@{z57t0@Jv&Tp!r{GBq zBg>eOmlpDZ6(5aJWl>{r%a3vl>hxL22*)irD|w3=`v&KD-11L8+AL3SC@@)`;Lmb1 z#C(PCuVvkK2-$qTs^BfvsW@&v+!eZoTf|QaslrsgP-P#b`M|jHO&FM0k0qsk)SGm+s{JA1XK$?@aARM6^QebMW5*fN4|>pjn_#POuJvF2{Wtds%zKbWzKYLBJ4|%p7yNw zBlxjw?q$x3Jg3#!dfv&)@o@$}#P~Pj=jdgK7Sfl4=WRXhT)mxI7*(S;+8pm>#!g3Zk67cUbfTHuQ$NnZ5e}?atnE z$Z+8ETFl*T&CL>X4HsK;+b!O=0PJpQ;wmw5mfo^fsd%wQbnVfNe#w z>#TZ?3#(|njNPzB?*d{aY`U5SE}~aslga9Po7JyY(|(yLnI4{2qONO9!J}54bYV3< z*e1QN2~#l)SLdMWqZ{wWZR0Jt%-@Mi!(&4QsJy!6FI&IMY+{2O`mMA8z`#%}m{bh|khcoTQ& zyWv8Vhr8k3s7#C9u<8JjyBnU!NSnLih6ON@LiKk5l08E=9)g}VJpSi+D>ClI)#L80@o-KHcp4UaRM(jxS+#2 zG@*_t&$qUKTUfv|a9M@|Hm=5vY8U|w_p$N)q!MQT+JuO=f91g634u4*k!m(lab5q` z%Dr{{d{%vvz88AVAMuW!p*5F6Mq53X4^#Zt+lHQ9y9Gp&v`OTBn#5qGy(TEpehu$7 z*$K&leKW|Omg&d7#Pk?V#cRzVIKyVV)rKub???-~#Hw#-8@AZOZnNqK+k&0O+2RSS zd8I7`8tYS7lE%tyZmee@V`Kez{4f?CZ_G{DvaygPt+76m$~N>p-I#2Gp8i&VIL)=Q zX7buLx1*)G@|QL8-lW%BdwT%DdN9|TE3{@x+BTPqzeJr@y5q&%7QNS+JIJP+YeSfR z%$jypsg|bA*|sZoQ>JC}pk&i<_Zh~ut5FOz!X`Zq0ygw=2;2vg10iF}xQ3Ru%+Gk` z_Lf!oR`QTz`qehc9X51LZ?dLc{pl@Do2s~CH+foBDqmp-zK8Y&Bg{L2 zeKs>!|4`eRxnehYT4tw9W?S^xHm}kXjngf%p;yMze&v&X-%fKKS|O&XTp=!H;DU5)Vgd$DTCU` z|6gE?iVcN(Z_^)T<8O2;{TqU7SVXNa)^q30QOTg5bbu=hY}40w1uj!}?CZ)x7wco^ zovf1W^ceW0N$YIWHSqo9e){t~SC+d?H{et`nWwvUab@_^S}B!C3+;1B}?>;7(%8rDs+jS-Vwqw zeR+3J=v<;-k3qP<{?5xsa>tG1iQ5 z$2Pr-%{Wl!b#P^4FVR;bl1%AeJG-)wj%8)C(YL~7ljHP&LHq@e9obKz+{wfALQjT9 zUyUr1Q*>{PM1u=$(_;`xPSrzETq9Glm(i0K>xD~Ia-}{9qj8bgbkN8rp^;bX&(J|e zmK*Db0$i-;oOA*>F7@Y~5HSU~RG)(;?tk$16uu)xb#V%^XEIpexu+Uh^}w?bDG7!#|5mNOW&>KdF7sN>Jak+weOPFY6evZ?~_bP_|f zS`Y*Py{V={lKn_1nEhyoQ5AX>O_Kd!C}R>ej5r+zu4Ay@w1*{_{*DAQf0v-H5dG-vH=BG;PN%s$B$)RRGITmE z_(FnX`DfdmP74bqIPPEx79A?V;@J``IYok{96?T}Wsga4;`* z$x9_TWrGB#UL`@ptr9doDZy!POK|#+5}c8Z@#Az_S0=%R!4hnoBEhCpCD?qX1ZQ6@ z!CenaaQ8@2d=341NKl%83l&;BvZ+co**NbgS7T!N`shjQYC- zqkoiOOc#tBr`yn2(y?IeXyql57kQW@B#@Qxln>fcSx}FbqSu>C&7~utXxj? zsgwjy&ye7mMhTwXCc$&}N$~u;61?zV33e4>b#bCE4v^sG!z6fPkp%B;lwi+w5`6v` zf_BmNx)!rfyJ)sPPQo00HbSRp;q4L}_qGH}55qj>6fIjS!SW3f9DkJrC)_K+icch1 z`MU(G!kE#WqSX}=G>n#@ah?Qg_$RxaqNbZASo@*`r@tq`8T%wymt9QA`rZ<3m?*)< z6C^l`e>>4B+QdJQ=oIZzlHW`G^MX#%%U`i>MZ333?A0tO{AcBs@vl({ue&ac8Lce$X6blT0PL_Kn+%y!_|Hk}+cqjglL=zOk8kNWIv~`%e@NnF! zd_4{-SCMxe6dK}!jlFPFa1CZcUC@tNW^>WJ4AXL`Y+%j@SkiRPd}sqX*P!=9Ik8HV zB4=0?59r9R6VGXag`FzrMA4ZbTblarYY+Tl9^s;7OlGK|Ha&!Ku1+9>KcJ10VoYOh2Nyv2xUZ;d&qs9OcijA3|ld+XL zIc2g=rZ3gW%qw*=YbTPN+zh-WT?C=|FToVSF54Q1?>tQ@qz7R#cn4f>3d}Bz_@@GQ zF99YnA@hW(Y-oBlm1($jA{I6cKkSNY_Kg8v2NsdC0}pJZ;ihK*YWU%IxaRPe;-xs| zk^EoYz=1q|-_x{wkgi3-Kh4GI?IyX;W!xH#WW%#Sb$X;syfNXg){yIp` z`V_Rw;Fv$6j6`LVvaAX}{@!&3EfC>VUsM5~;m9vn!%8=WbPq;O zg;(i|*|WCMd)DIZ)UU986s;S^zc(r>olA#MS~}MYp$xlLh;4(s^yO)QWyCo0ie}Bj zXP(lELpza)JwWcW;7WRwYX-Rv9@=z{9-I(suL!VbbctG+D&XovdgIbm>9`>g)}o?gNuLhaY1&`kMot7l^UN>UT7`;3r+`$F zzL$E2(wjRlPDc9UTI+Q5h;q`>PU?bQYe$J`dtmP7n}$PnS~l{{H|yKw<+$CdZ&wkk zmsn}HkHn8$I_!q{(zWo`n~{Adk||-Vv{j_BP#!!ud{hA0EF11Z;m=WZBx1AS@HQwE zX>PN5;R1rQZMG;}3UwkmHd`Kk0eVGpZMI{$95qDpY_=z#PO3-?o2?3uLV%7$ZFWfb zPi6~jc4YW=*4xr%$A$55L`CqILJ(J+lIrl6thZS1)7hEf0R)%W7R?VICb!IH7lfC> zgCcQR1w%L`=Y|8+s=d&60wv2OJfDR-$Q-Z(B`YP|o@yQ~4?}jK4wa3|&m+58R(_hf#s&EG0vZi6F3Y<{PN2QfcX58gn@ zE(!1Lhx~C``#{NF3CF1KaCuqRkM}Fm)|R1Pd$V6#pV%J(da6vreH%uk4wE{mP|onO zm-KyS70ek~z6-}WIit3KQr;g6x17-gs`w)%xIf-HLA*1g-IzPTaY}-!?XS3@(;u~- zR#I(BQF(_+T~s(b8h;YgYj%OLri%Xvb7i-@P8SSesBqY{I|J~~@jow9#pj_pnTi_1 z+2XgB*XDxp^;OK#a{NoG*Z6W_)qXVl;huKv^49Q;yvRdw{(w5N%Q%6G7qXilHa-xF zKLxcSdrW)6P-GFx;qfwLw`JFPts#~AwdEecqefKPF{nOk99!P(53poG8|2@{pQvLF zg}Akdat|TDp{lGmS(RlLa8Dt>mxBDRlA(!YY~vUdB#L`a5WY)ApOEOI`JHg|mCiPB92qTJ2MNtk!De%9^tg}?%C zapX5Fz_O+~CI+%obc`|=ZG!@Cfy6;r&cXo>ZB#46R8qj*k#N72wa%~UaP}p2Q3X?p z_%Ey4>qn4`w#-5j(+%RzO3uRKCu^g_;;t>Y)spdS=p%j`b&!vsP;OkCvw%A=Sw>KS z&l;*3w`7u60s297EPYiKaDOJ-*ms{;fj*;g*CvBG;I7h=?Ucb?oH(sPzPgrdc+I%A zlgrrWTV3aA#(kblWq%)yPmR-bdiGES-1*6qeZlDJYmJ#&v>JK_>LAO|9a(p1n@71n zlu^fk@NjKUs{-y9d>jk~PED!=k2dX@;X!fnaqd``ZF?qhA$0 z50Z!8X-iMz7F5{-Rf}Pa8&1i{_-5vYo7|}QZa7hk z5hgb}zJs|@CO0PjK_}$KnB3U-?&ipyU~(tKJ0mQ&IK|{nj9=OwxrrusQv4xGsW!QB z@vE7eVshi-?^5P8lbaBKh%!$zxs&4^D08mKof5x1*pfpkKAYq6B=GOHEi zz8z#=N6?oaCZssTj!Iiuj>uPe6}mj_LM$($!~cfcHV@z|VZmJ0GrA_;7VG%vTDluM zXlpCvE;6}bd_UBRt~0q%oS`_n-sCdl<#FUTm|Qr18P-YBi%qV1{3r~%=p`nX7tcoA zif%Nyg81bfklSQ(Me)}vk=w!;;gpoe8#YF6D?Q06>8R2hVs^)Zv15?S$FvzS%xSt{ z+5|MP?a}{2Bhpu*eOOzvZK1RdvKwv}-SsmK5DmkO@=aF|{;6*xK@`vzmA)ev_+XTl zmTTb4(Z|V@2niT0Jb8&8MB!J0GbS9ar5hOo*hYlENODyvckxXXk@{5GDnd4l?`vC7-n(kdfdl|F5r=q+y%GI zInCl&-cCL|h>~BiBFUNOXHpU(AkU{{5_HMgRu7lQRuX4pp7l4iqjbY)$a&e&3I-FE zjVUtcM7^Z5Q{-4$m^_$8?<(Z?rgVU~@OMJtCCQ?xzM9LQS%jFDc67kH3@JHU=i1|79*5UEUQVt^2 z=3H1uCeJbD^J4HmVpsWP7VvOWHq)be1`IQ~OHBxf^6*oBV51J#icnD=hROXlG%2?1A7`5At5lI-*6;t=f=+B&>35ePDg~oF&XtpiIh3lf z#uf0OSEdlq&aPCwdTiL?8L%v+_jPn8)piJr8$^`H!_t(Ee#o?gRT*WNR|PycmKz9Y zYXboXk}BXyvdl?<-pJBD|1kzH;32b|kL6-cQ%P-rkJ&@Mv0Q(`j=3 z2<_rV22ZW^U#-bm;2R+bsyxS*Y7XIRk{D5*Y|A1{$T_bVwY1_CsiHjYmgaQ)13n%B zD#|l(Ifet~bsx`^%#ZO>I_Hy*S-{rQ$lf%IRp5x0zVr~{wwN|y(UkZ4{FBh_*cL^6_c8J#^+VS-N zJA{W&;na9<%s7S9I3BS+>vGogxvFrw$puwAHsA<=;leovc)D3iS>Rlg3#oQ-0DsfK za}4lY>gGvYY;qyhx~vM-{pNH7zBqIYu3h;If)$$cFCK!af*GJmq&=YLTE`#-CFUaO zR=@`pav$dL!i&X>Ue1lGfKM)D7wMP!whpR*k1}Kr>Dz=VyW2ZH+mNf^E`_0h?+i{K z#L;z4OkvwpcRm2oU0YZb&=xjJ6>6@U@v(^foCZNBV_!#6s({Z(f-2x*Pd0#BxX>S-y>)>q;IkLGw-SiC zwm4ngEZ^85)r^m2q(9nH*ce}AOWDcmmUfiY9l`$2Sg?RkZlv8XP{upPMqVM+jL&mq z72)4w@5+N5RZvb^brEPoe4P^^86C*!uYeDJWFaBLef^;!HY)O&kTiq5!ZBi^qHZOH zSOgyz$)00?*l%6Ln3vCwPP*TC~b_e9DrB(P*g0!{jo{}&_-EHotw+? zz0Db+M9!zl5;5|=#n4U9m)HBbt=8sDRFRj7fS+C{r6^}C#&V|cF*5y((^PsZnTamb z=ISM%!q3uM%V7)xV?bREej!6u@H-h?)9>TL)Ts#av2lw+oWZ(~}zT(mPOO~cZUIRx}eL=>v2oo7YEWdfT$3LZv{% zr^-QYcx-=M2R=Ymav?65CC#u3rSU8%b3&1ux2Ylz#F0Ir8{0#3$gSEBQ&flm?`fabwI@+QVM2XtMq z8hC)X{cxAyU@1mKWIuQ~Eox9Bx+1@`5*#THqiW2d+9Sy|ZaJs}A)^tMg5#w#tXdCb z6d`j7nIey1h2TQ8&N|WzT0qF((PhDfa@bc9jTqrY4cg{G77IUt=t}8A2rp{TBOYX_ zw8p02g=n35QG-74AS>iD?H;}snMS8`1Qj;Hv%<&acQ^rTVthpF>s{2KA_51a&87Ft zuMKN~YOwjb^m<9elp3dfsM!#Lw7#BX{osPC>O7C}pv&O%VO4dF z$9TfeFpfgqRMnFnWb%U!!Q-7H2tpk6Zc`F3kjb@2aIAA}rxfPrWO7~!j(4^r4ERe9%|t+w?XX+v zT<5=f?(hu-ZQ-G9T}W_&^CG&_?L+O1iNWM|+Pa5KexD+Em$M8`!|Bg5O~guk=pi0; z7B;}^xf)U1E;U4 zPJ1u*R|UM5t3)a0YGa^PX@maKn6-=1fhV%S`wY(OoLD1M!(S{V&3G8NmL;SwN^El~ zF(I=N^^~aXgk%5C5<8%N>ECWDaCbDuA?X5U5tew5C>{xv?saZ}SNdjnv-tHKM-Ban zSbol^^n1D2H=9!zmB>a%%MS<=!LQQFFf&sXZ|3bxW%q#?m&4cV6{!-3 zTv+Kx&U;YZ57QQ;X;dN?K}9Zvg26%QeNv?PD(&yWN)J1IU@*V5)OO5vvB4?nN5fk6 zVpU?Z3o8u;TJ=eW`4rqmSY{R!T%LXl)---;J_SE5qq#VLk@45J-aEGG?J4{nC62KJCIv#|9?Y7Vu1^OXw`e8l`_9Cnli^$+v8DFD|C-N}G9KM*4WX95|gz>aAxGLjhL^*xh zXJKrIO007crAq@>BL?{qlwqV9>|PIhabORu^4D0u4*R`Q*9O+1MgCH3xJopECR)qV zy8>@vtKmn~>Y%Z`4DQjut0}EX#u=Nd7_C;rgTqyoUbL1}P&jXy2uMTE5s9gppzRDg}rfXAUy%n~s>ra}24-fh}YKL>N$nO`JLxOASr$@Qo< z_}p|Z+YH^7zdGD2e<_*9wRS=OYoh!m>zsQJsa97@xMeBVa-_@cFw|R z*V@#2h(YxXvRw_S#K*ebRtbIjF|-w#r(!i;db=Fqtm)@g^d`3rS`<8V5Vt-x@ENP? zurMo~=(L3A`4~n&It^aOqDk4e_O;{U(77PQ=nZ|&#YnBdsX-_{U|0qmIXkUNqf8fPE_2>GQMblGho?vsV-v82d)pt!k8<3IB{&$&K(w{DxirWZC|b zjxZCDfumXQGdk=bX^GV;4TKX~ZKu(45JMY6ix56nznYLyo{zIgTZ3I8jvY zZ>R^)Kah>_wpibe`5X$eb>;E*@Q?%1JpPrKaIOLa{s~6>Yas?d4+sqSsVnl|*w%yt z112Go#SdA2USPn(?96Y4&yVrJ!9X7W=t2BDS%+5r0vQ}yZ7A}vjKl)iCG}-k_+Fkz zDVMZ_ap4C!3M1Dgv9d7H;?RLiGRSpE|pKeLw4 z{!kgU z|0e>FgEU5ZWvH09V{nVVwTRo|5P!Co_A@5RqJEQ5Y1%|sr|6j>ns+?Gi?NWfZ`Y<1 z7s`n82Z*u)y-=io$08B^t|Wh&qoVv(qI?wr?;xp`#~(1_B{=fFA@2YQ_5pkJY2IHz zaSv05C69?#DRmgyNtdmS^&cYWa%JQDvDzSAr2hwS(k?eO&Vo3)lt?e(>Y~eSjoaYN z>~|K_JNMcP8bk7!1Mp>@9fVq+=M3lvX{~GYOECyNPP5UJ{uB(~dJTS;NvNo1r+dV8 zE-~^oSSoQ5In80u*4v!ESkain=AU)3#LZ-0hN#jilWt^V)a3M>xZ1b zA-MbWjx>ZODlwA?9+J1-?HmP!uL6r$M?@jp{!eExyu{R^n`aSs60wd3_?Pnq?(52U ziHLH_c-k3_eyby+m+WxBecp*6s?z3?Wef=4CAc#5J%}-`)QUrY^43tzHQWyb{8v35 z$CbZ*EecGGA3g;GYH~~D(B<(FOOTsFK7ZV`_)O%cnp`-3&rIZ|nOt7Q0$^m`dXQ#h zU!oMr65&-Lf$1*)%$-1RBC0%oc}U!ZpU* zKA`~+1eX=OJ8_uu+;=BhgCY2Xff%)h)*oL|CM|LluBxgH8Sc9in&GZ+29smnow%?g z{va0%Wp&p@Td-)wOb>AU?L9>`@d08Qa0vnC-HETuT?wZ0Nf2P)o!Ci~d3WN;OuSiW z)nk1T!tm$%h%oO?9GByM=v!-|dVWowd3WN4^pw(C3-{fL`B2d>oA%=uhl@kaQB#b! zm6}kg3B@747K0B=lgV3rS{k=~{*FsOV5N9WQm^g}Hs*3Ef zH1xHm>7S{qs#RpTeXSW@U-KvQ>}vzpu7P^_JNe4c&A7&R+j}glZ+~`=gUC$K_xqaO zfqSZQlK!Z1`~4ekb+-jXiSMWA5cn-fG5*@VI5b-~n}ji)M=Di~Y{$S*v)>CmAY?2f6oHMFxuN93kr=8`1OG|LbBwr4q$Pfy@0TB;+o?hSAtbO7kk!%^e>2*x z-d6G(+2tpL{jhY*7S$iBxezrle~8Q7BbhPLff!udx}M}l&Oq76WPZccvL|}nCuLYg zYS{(k=AMAE&&smSsbw$qxG&21cBy4|k~`FrHktjND5XpzJ5|1%@sA0Iy|gI}aQ{=CaelJRm#E zMDfVZV@@WMf8PUNI{yqq+0<$#yMgC>%tP{L7q`-gp6&*2CzF2+B=Ef)Z-WiQ>S=P| zE|2+>oDxYX`6iiK+u!6%3~{Ti5&IpPT3hAh!)vX!hS~I3F!jV7bn;3>&&*z(?L214 zsc7PMjn#H2nR;^0bcUd3sHRu)G>;i}2DM5lxrNN9C!?M110#c?_*XFkcBJUmnu~G! zy&kwvU4@x9uyev+#5G0c4JW@ zFwA9+bsFPOf6%0E$st8#PMi%nfqBlT?kOej@t9{hH^B*$O1?xUUzrIka*jpN<+jvq zO5z}ye0L~to^!9aJTd_-5gHCAr_;ds&Z~&Hu$5PGXd5#5v@kF=kcq+OTAsb25<|#+ z{zPy$2hK;|9I0)n@x#PyB5s-s#OOdN5I!SX1plgsk#R@h)#79X(H6GLgMAVh?HOFp zDG_#?g?;GN*D}}`VZ~pcPg?r%u3BS)=N7o#mnv;&DPeqnA#h#rd31-{dP-75`+3~E zg3DUha;c6=O!K(AgZmJ}j9E<;`pL%7H6G&E;GJ+2zx0Ty;&u-bO6zn~Ekw6i*E94* z50ag>DytU47tSKQsG*+`!uPBK_c|*O{#`FlghqhD$FhM~+Ao;P{MIs0U)p+zu4yY^ zPu6MO%DzW2jEH}fK*q6Yr{ORE_}s@L<`FSqGY}`F{a~Y<+g4M{CJ!+q?Fr0$b!)ko zi1JO4u{do|ikBD}Z+M9HY5N-2C*wyVPMHcBJJJqg*8g3%#n&T=JjND{xIJwU7GHr7 z@@C>UZRsTwID0>k_8pwYKg6(h>WRWJXE2vhyjF%d4^(P$rKa==qHF_*?AT!9xaUvB z{e=W#K^4f}{!i32ejV$dJ9)lpaJ6I+Eo%I&9_=Pe)4|q+)bWpb+`qfrhA5{h_LIS| z+2Et5n3%IE%!rH_z#jx~3D@BB!9{hQ75SsUW6*4HcmN6;w=nutboY3_1wAvH zaznyX)?LaxW)GW}`A7Lv)3*`T*PiJ@WK z+A@zJlOeM~f7ya9WfF5DnS4L7!C1*bBsOoV=%yGa8gf1nlNYE4v!xWxsSCKy1)MDb z{9P(KIKFklzOh5ix5sB4Ze`Y@Fo{RmsCSn zlFQiC;CopLJuO!yt|No9dV}9(=rQiBZad%~WWII=m}yQrdS^bJ*;q&={zE3CT7zuo z9ei2B=Y`gMY=}XWI?W*4=ZY55iU>xv1|6M2Ud#PmFn|a~wFZ5h3@;-2Y?C<6LmclM zz-Xvj%UU8B)f$X*#=?*4YI?henBshbzd2S{#tTF+W;Iyo#Ie9JQO&Q%*B)Z6vk~?% zQO(zu=5oSy5MoVl7hVWsC%~$ep)mR>W^Q>c)`U=bjL)b-?&EW!Ek4KPN>9G35~1B(Z?Iuy7f-hLuN>$`gOT%xKj zC-~GUAR+D%hE4R{9_?03Gt1bD=gHs^YT(XQSl{sQNwRL%g)06_9#2pMJNv;^%psNK z1qk5*YT*9YFuUEutgWn~)Z^{G7IXhhiZMKMd|FXO0FO@t{~8I;O)5Ou<2_dbUfseg z3E)G$z-!-OEoWkn)++K4SorKvcMOWaTg%aNLgdPL2edb|R zI5aSR(mOEjP`+<~c_u#PP2`T{6Gxm=R{cyJE_@cGk&>nDqN;LH*3iaUnjR5mSWGzD z<-%EX0TV`=%W6n*X|4nHm<2QsQaT1!H#L90Pf7g7)6lFndr&nVT&W&=$0_a9p}>E_ z+5t+1dl@KObpS(@32Rz2r24!aC($BNI$(5`W0Ck7Csqhvj$ zrByggv}^qO4wg<${JcW)v}^o%7v{NFnYCIZsytr0B*0`t-CFfCp2S2Vv~M(#Q!wU| ziZAn+*>WvHN8RGr6QRAMrIez$@c^xLPc!upX&B9+&0bwZQZ%a zaP`cCTKfST`xNqeT#Gx-O|_m#XRY-_YEDnQfYM!0WFb-gJTKIFJt`tMfyH(WMp_vf z4nIES(+T)$5C+MRhEd)&<%R8-$iL6su;oFW4LReod*KgM7BPb1{TXN6RkQH799uC4 zIVDxjgb-l(aTdasWPQu~$PrvsRk25J?lK(2GS#y;8C`#akh1WvxV5D;^VF$iUVk+% zB`Yz#h!b{hXQM1ALW%O$LuC$l{vfmZN8Ix_ zqCU>#sa@^+E~+~F6g(#K#4#s6c8tG=tnLcxL&+i{q^THxX<9uVxBL;MsX>dFI%(2m zRXmj(JV>I91-Shj`Umjku=y(Zfc}Af^+llm1R1`fs(L$!{25wc`e`#MKvnNV!k4EE zj5)N9uQyfi$NiC@%VADtw9dyPGyIkxeLRzT;Qqg0s2KmauhB*@?|z7wYXmX}fqVv2 z-5U3NEac<(>`~2c6!A%dVKbLxE*`yC&jgic-v+~65{5_H)hj{eF^|D82L<%`^DVgN zLcnmC6Ic_Uue(*BPsO+{sD2z|p5z!1b23H6xUZ{zADQQqHB{AyLFAcd00x1mPeh+( zRyW6vjIqy%VNNuleH*6*GI+8AW{feex*PI5hO`oX(^HnJ9s%-)$+bmDQ!zeVsa}9v zJ{m9;Xj53%AVDSM(a!F#{9OQIgWZDKr&zH1XVCdT&=k|oZfc#w^$bi=ji)GHIc*9wAD_XU zB~%O1r85vby|$hIy>P{_Wb%?9JRDXy&zSM8((Sp#1Mw9XJ zvLaCD6h!fA#&3N4Vb0!c_ZG6|fr@uc z%$X6}e*Y}oL-1nNe*Zk&su&+d#kjZCm+CDba@lO^Va|^A*RIfTE{-q+Jx`c@%||nj1C@&wLuHQ2PFA6@BXN)0DY!iWw<^Zvi`F~F)l2mmAo8%*l+=!? zs#k%?M@PO&RP~i0@>x_8@g5L)1Y?NI3H>_84PW*1xZ@tlFtrI)^+zCbmu-m5F+6hn zSN$t8UD~)T=Fn>XPO>_8DmX>p!1>Qs_=^_w5H)Ws$oED7du3@~P;aM%vVZp3hl>+BD~!xykQn$CU{XEnh! z=KkAa+^^O53nooEOX0hLY|p|LC`3CiY-MkzENqLrDRevSzKg=ADaEtUjth)M?zQPI zwEY~eTlCmJgxtB4Ce7hlC`(kl>VES2q%_7C8GoMs%=I{(TM6!x>roGb%PY;Wbw4Ss zW(@At(tm-;*_+SURuVt)1d&cr&!Jo7jbf0 z%{b6&hV#-Nm-E>}sj&7R*W3nj%{90z+glF+-*c>agNWyFSuXoeGJG zw-I0|KqDhF;_> z%m6`0yC%4qq84WBM9(vN{o4h3z5?Qtg`E$GW4$TVUC+%^8DqGP&~I{K;=B+jzN}@6 z`qp1Fxc-`>phdTvd!^%#!>SG?YNF*)wuO7Djj70$(%aF504vfj|*95c-0ultAj6yy-eVZinQt^kji zJXVS(-~J8FU&K5mUXA<(^2#gF;po6Q#W?i>0de6x73dOR=#RdMV4pEZsn^)$=uXgU zKEr*@`?#F;9WG0ro2~Gd)Sw#KXGl#s?pF%CV;?D}NzG{7*9_*R5*HNt0!7$0VLWmP zoGCTuk~f!^sRXQ&^+-m7nK85Gdfcv-ZMaT@G6+R8PB#H#;v!>$J8c^16sT{8NQKoI zAgO?)0dk?9A=aSg3-uK7819mX$N^Y~Lxc~k+##ap2v=^k)h}s|sKczCBbFr4KuX+6 ziFSBtiLRt+^+~M5td(eHoqZJN&N@Xoe>F6hK8WhMBYE1^^0I5+1ya)(YSjdASvIvk z#B8Zq33|;ryv)S~v-Caaxi_qc%yUeag$CB_26^fCsEX66%m2`A*ndP%r)n98kV|z- z$Kx*9xi5k` z+7;<8$&r2^NuE39{-ry;0}-4LZ?G-7*=-TptQRs%z6OHxVKs!ner66_`Vsno^WXy( zFSgeLH`Hs1R9GELqyp-(#D$(E9<5`EEl`J+*u<8)Gocw+dr4ZGv1BhGT+yk#dX2G+ z#b2?ldBdijQBiD-l)yjQ*mfq|_Ff;b&Ye9U^97tM$+_1q!dVuaJN4g>XhH4z@91|$ zt(CE_qVOHsL9YeDc>5gziueWjrH`FyOr9z$@KC)bPleSnc`BeDllOnmb-pFIOLo3~ zP=_`fNqhQN-*x2rSKoD*wQ+FCRx5EiCHf<4okIT~B^n3VNpbE_IK)i7OT&w#NbiodjPnpnG3HLm&fakaMO5sLF<)dEz=Eh82 z_jv~XGu6N=h1TuJa{m+7g$hf4fcg|#O`-o2240d4lW|aOqQtspH63t>%(WS6>4!j9 zAZVoi3q1F~&4Z8ox9h=o;4aC7jdDd%t(@8Ijbx^7GobH9(sv<@2maXy-+*_#WW7US z)b9l~@M>VmV+&044w-zt-ceLgpLdJ|wI1(i^uDY79NZ--Z@gnU%l%JzM}!Tm#=?d| z|3|!|3nbDz_EBP8vziX5!uzlE4!y0a;~6{uw|U0%FsWHJ=6;Z3+$ABzT$*A&b+h(e zDO?NfB*liGmBRCceWduJ*aTZPjMs4fN6cu=L~(8uTh2^fgVK6tZ*v2b7~oAt%;A`h zFH;nU?m{NUXLQw1<1P<3X2xO;AU>U2bkjNbl>naWbv)m!#qyS!bm|9o+9E)it#yZ^>dKImvv0D^?9h?*5a)kvh3#f7u@n~ z2j33T9@mm%k8A(*@a90e)459VMJu1^87E>|Z*Ze<-D#@1Q3x{~HR~kY@)e1st*UPu zx~TAcrTFq#^=8urr)@Q9;fJ;>bOTO8P9GBD4KkM4fjpMOvs8oey5^-%qMqBJ?FClM zdYgh@vj_B=``DBvpDi{aY%fgM;AE*cgdGB}?gUpAwKS_)OR3 zePemSU})K?_0czoea5@&0kB=TD-y+O-UPkoSzi8$3-tRJ>UbHQU4_?38k{0EU$gf= z*1dlKtRNg^zGao-ezoM`+G?MunkKkWOA43ZaRTmk7?2sDhmf(`MiVDH;+Fy55J#!r zD8+GJZRxc;#DNfvu(1UI4)sj{xEs)gpfPrIyVUr6rA8rG7kZO zrY?Ed%Bw2zDQ9HNx1?IVem62MsWHuc1pvrcFTFsZqN=o3(L&Iu=%-e}Z|hgIBZ?#k zx$z%SQ5)oZ0?)7lf3bp`Eye;VE#Fi};0;=!TJj&O$Qr*(Yb#2mh>GO})Hl(_t)os%Ewi=h;(c-*L&`olwZt3J|zSPy2IVEQh zBJ-tugXSbDPvC{d;`6$RB2ltN@ygd}hT$g0HzJEeC-Jrfm+CWdX^;PAUSSS1HH%bl z!-cQ5`eqz6%Vaxb*~#o5xaI4E2DN1}%3}t@LF8NHm}iZ}p{H>lB*=54-HtqJYf4MLhIlElw#J7+hM1H-DLfJ(STEw42 zbg5Z~+nVLLsEcJM-Y$OOD%9N|u>2LAm-64D*MaBDQfj#w5xZ>qXHs1~KJn@*=c`D| zkzX%+Y+WN+SKB&uJ!I(*7>bh%Re>7Z$>@5%mZbLjmW_nvSZ%Hd?#bB_*3eXF^^>Pl# z44tpkRikBLALWo+0=^w)=oaqjA0YDYTpOp-j~y^#&&Hom zG*8T{=K#PzPFn}CDZoeSQ`D>tK=2jq1{2NdR7>_P8msRBfIpoxZ0)#y_OoE~^+lzh zj_9@g)HCqPE9v+_3Oa71ymhfs6tmZkue6K$f{6zM{}rqW9ssA|0p9%+cVlq3UPfPF z7B-t;4ar_Dho;~OHaN?;qIt@SdBLXcilz?TThTz`^^nMMwAV^(iTh5U(_Ozl6^|$I zmSTfzYMRis9bSF{aiv^=Yc*Jb&Q8rP+}GTI3;r#A5z@Fy>bAnPq_<7$StV_df>n4C z9o3DpplX{+ZcQ(cYdYhyj9r-u2!1U!6G2}sV{n}TZMnKR6XNLkJE0gv=;QS$K1fve)W(r7d`?t7Be@j;VlT9Y3==X0J9nhVhQB__ULLkO&ZmwojE>tuqa9t* zztpeejygK7wK|3_G&&x&(l|!KrjnncV=5q7$4{(|6CqAVIqH~ibnF8fb#z7dw^gJB z5{2IsCEGFMpt0kfR>uXl;03OZ(G(q10m(W})#BERtT8%vM1>MHI(}?*bVYBdU&rb? zI$mgX90{T9>piyM3$Bg@DLSSCl6Bl~b^M#vaT9ecFgmUQjdpZJzgfSId+X?!_NB4o z_g2UB3rt^U+f-Vn=$Hyf)^V~Hw_b*>HFi8i9a|b5j{=Q4x}pnh6)AmPgw;)QU$3${ zUI78L;|N>uR9DBs6dh9m$vW<{I(}t!9El30(CEm&wLl$R(KG7Tad{mb178_C_PWUE zxZM`K-PJLcqGKu`S;t9Q+2{L+-lSRC)qTmbE;4zxpV$wg&wocXgZ%li;Si%L8GRpSYcD^7rLX4(6XmxIh8`Zdm{F4Zla z-2VAi)zRxs`(4ypw=Uajaqwvimp|6>Jp=2yT&i0-xvuYQU60zjT+}meU3PZ5%+}@d zx7J&iOLa>p*R|jq(*XuHf>la(s zJX@EG>bBO}&_>3A;M0aK|J!=&a;a|VXuHf<_}$snso%S_+y!dQL~oTi)G(hdZRpl3ASxO&9|omjV?3VEdP$no%G;g>Cs8Z8{$sm^R&tyY*54oNkj*h09voCj3a* zGq+mO3pm{-ORjBG@O#~+X#nUp0g&7#5IoIvo32RF$={|_o>vlWx&^!YJ7ANr^_CMh zn|`W>24+Lk><4#4GXZz&rDT)Y(7Xa2+zrjM;OGrajulg2Q+GpChwk6doCS&8(5Q|^ zA~!TWe{?rABSHO(4b2X)>TGDPg*0wxR$CP=sZ}NQ*mr|`1uz#C*IUp|o7UOTqymyB z?0L|ZPS|g=>G?x!i4BpZpmBJ+qIbFaByVVnI;GC7o3+plvKa!XEPp*!s8=6!= zvW{O_9Y3==jzop-hUOWoqbGWu@dD3T&5nL~9Ua3**zgU~?NXy-XIrqZO)=LdZwylb z$vU2{#jTfh5T_SD)X{EeP6v&4bVVOqzmEIr=(y49_^#D)i7mL!)iHHLlL|=I@u1bQ z!xp1sxHELL>!>hj)X^2arG6c&P$W69Hu=feaVZ3_ub;35Uw3s(-O!{0l69P|#jTgu ztd8@kqn$?Yw>r9_->+ZC9d&fP*y>ok)#%uGy>XHPn_@ST+}EjqWF0@ZIxd1Z9WAM& zovX%yMmxHqTiYs9{84pDwPWVb#*R-~9Z$3cr@K0)ZfH^g$vV!|;?_(4Wk$!2sL8GLb++K&T^&<5G^v1O9sg@}eAMc=i8|V4%ns0KM_2Si z_3OB|j*bm~F?MWxxzX{UE%=M8W9o(`6_BjsXqFGhE{z9(Mjc(z=^Kn& zrSx@C*VMkg$O?VP3LOcAI}oq5LbqRRgxzA(6rs!O2>sLw&D>@*-ASQ#L-Vy2dZ!h3 zU;RS&)e+j^S7XiNAe8g@E?e*koBB62sXTA8-XITIo%&s38g-dX-3WOJ__U+=%w{&t{Kij%C+PYj+-bT|t zcXm1re74Kwe^PHvB<-xpmpu`ZZgZ%P+C~WE;9vw{&t{zuUTI zTxo4+Wqj(^Wg~HK@Yya`%Jucuj>It8uAr>EvqC{$!I>TR!CC2U_$;Bzoa}ALQHeDVs;ERIz^`pYaneiG<2jo8*r) z{K)E(`!ew?PBzK4kd(F2Ie&Nx8#U!?|7_SIj(-H5`_Pqkv;_W0~D)XTX z9`LQ2O7+whxx6n9`3DC2b#=cfQME;02fowSfi?~CSh7S*+aizWf%Z+{i!MIexMM*3 zF7P619PQ$C*|OlU=02P*TORzdz(><%j|$!edC6s~gV#W% zTcx-iUZ!XykM+V2U%FH0W&b}uw&H&5_|G=36f5M1a*trX*s`GlYkRa&< zODnoe&X~GDm1WDSxJ!e>-#-($7BZrbOIv=qWb_BEKstD3m0T{JQL3y+&d7Ix%DG$y zqF7m}+=3xwv{Av!WkORB%Vc{ij~KdKUXOv;R=)M>QQ^y_9g*#2EqJzg(dBXxYwsjS zFj_4UYJSlg#IABF#+o7S+#=7@j6Ec)u}eGla=8YXvV{De%SfluH^F7ghDrCH z*b`r;jlM;mfZk;zWR^#R(HEnNva!+uBhlc3TVyCQWhY4=sB35{_7ZyXGMP0`l`WD^ zEwMG$GD9#j{`35k4K!D< zF4`tD(EiR{A4Z>AnzKzlMpHX?>jsNjT5y}(2}I}a$3{F_Xq((YT8|fdd$jO2c>)dZ z-1C-fk5;rzw!&39S6qVVW#v_EldkB6&WU&WxU^vAuv~<#&d2-_bxF>!8;U^bRgT^; z1);}gusc`Yf|giWnf;F-es^h+UWjPK2g&%F!R<{zX_>wax>(9%Fy{Z_iwFJxjnU9W zn#}0nMnISHW*thXDi!k#lr*u_lv>>MRp^*Z`2s^LnR2`rZQW8`VL5|Q=Oe^Asx9V| zkev58XA$%7oTCtd-|Qo5MgRcxrkVgs+MZw_?NNSk3fxy=lC(#HX`}k#i0_KwAfHT=UNB9iBh#IkP)3i7n1seXq>~A+>ty0DI+>IM{|bes zbYRkG)+C+GUaOON3o}SOW2;W)-=~u^pV!G*AM0cR|Cmmw(V})bIftKN3N>0hPA4@B zb+VK_8)|g^V>(&>u})T`!+k=HRyNnk1)X%VYM@S5&(z6<>vXc_PMuuzgih9dtCQ#N z(F(rU7p@&@^im7VxS>X`oS<{BcGbDpmh0q=YjpDF&pLVQ0iC@4wocw@$pVe1&DF{D z4REti;~Cp@GV>mt%z8#Av-jy_&QCfyy(!!;)Oc>0PUan}lQT}&$=Pdlvfyr=EP7QZ z=N#6_xzRAoo!3PtOODe?%{ZMbov)K+t8}vRdYxRbODC(|(8+}dbh1Ve1VWA1mFr}E zH=S%4rjz?->E!+mI(guJojmxeP9FM+N%KTLhHt3(G3Q|Hhnn}=!X&3Z*Bzmp0b_JB z@McXMJVED%%+Sfu3v_bq4xJpgM<>HR)X8uMZXU`xzJ*RkbYzk@6}gDNp}Z@*>Ex<0I@vx)Cs!}l$u-yObv`%gqtdkpO>*S`jI{Dk*baKnnI=St0o!r5X#)b0k>a3HyN9g39({*z1 zB|5q9cAeb+lujPluagJ=(8)vjm}oa{ zx3^3uj}OqvzozTti8VTT@;043^_)(g-mjBqf{2x&yl30!r_)TCi?rdtFs!8E%iVBo^fW@t$%r(;qB}C5 z!j!lyb0Zv2Wv*@mu4>!_abtZUZkr6JLk0hA`Xi<=2Y;C?ycXUN49NFDAm2;~z~5z( ztcAo!NROO=Tl{Oj1`AJVe*ICnW)B2ImQ`W~+Y)GtWO!G?dG%dB?t)_3^9g;Ww}!4j zQZF^~M0soJK+DhoZBl&sCYzO)jVk89O^n>Y~D#a}4lw&L|#muE~W|;d>g`6~J>0 z@LWtsq~I|GD_b|^+V~8Gf7WxCcvhp?ZG?|-b9&c*$Dk%{#k65w!9p-TlMWK z^lMm8YH2G$&8^HtiL?tr%^jYL+u|X((2T7%;Sx~k4fRAqC#1hAKW|hV$t`eW7BsqGz6oZ>x4l#^HMRT zO{a14lRczw6E?y)g^F@El&i1?%M1m4y{5a44xzg)G6JPl8%~)rZPp@{Srp*3YdYTa zei*tdRes*Zj?DI zP}>6ILecwKasw`z3j@Y~Nihy?{r3?N(%W``howb1b<4e2C1%dGvQpq>oYkd20!n6M zd>@Z$n-=S~>t>hBY7EQF?~FO4ocN{PFi^%j#$a9{RmSCjtRnn-k;pi0gejy|7lAg! z*I^No(Wx{`l1xAxLt@>HD5lYMaVMmySa!6K8VoUIz& z*`-V=4l9Z9s@jevi%!z$-mPu0_V-wZ-~4$-?B;J^c_GaQ@ct-S)(a2ezH+vs7bMah zJ+CiNhbtkum3|Trqza01VI;4VgSkVD0;a)SDaro6pfB~&{l?_VO3qFIae;8C)-`4h z)>krv9Wqh$z40*JXX$sBz_)@?ejZqwv7V{=gS|c}Dt$XfV>npG^_}En{>Z!-gM!|y zqFlSl1Gr@727HD~yL}}75LkzE!Du(Iu~Ii9`%cqL31b>iO&e?PLm&$ug*A86EIVU` z!kN6!`l#Pnl`uDyl?@***04gE=6OT=Gbg`_ywrbG}mT3hRe~*P4jHFXLvEW zEo`n)aQot+sTKyZm| z(fsgXa?5OXL3k-VrfFP$MBl&q9+d#ZV~oCeu} zk_#j}n8mxuBAs6=;T6pH5p3P54QPRCCoF7 zrbBFgr-TPFKU9AXGElNh!h8E6f1K7nP_kFTG3q;9YIOZih)P>qhJNkMel0n%KNc=i zg>j+%t?1)bgAuc{hId$x`z^Q+W{o_07o0O|)JRZ{?vGKMHM%D>SFuM*aDO~K2u={= zhxM|?%mm0O399z5;^qpJ4>ya(p2V1HR$#2BVn4P=uH`8j7(%$=5NTfk-9E<#EiYZg z=0W{*MU5!;RtzUM7mTg1Vvd%=ABK%xFuKO&^MJE@`^Ubh?Ta zLhq&zn?4W4o`PCU_Za=dP^1Lq@*)5z1M;fZOP>xN8e;5oJ6Z`iS2~o#Z1pTqLf| z8RgDVmJw9ovxX|;R#NgRKtG6%HErjb)Bb-`%{_9{yrMhHchA3yDG{Zt2~Jq-=wRrHD)oa)zE8>PO_{4 zAa`h+=W|~xqc9OSd6<1{R@f@aeXcwPCv9@FHEeA=qTKz;4s=eF8MYqn9(obNCUsWP zzZ0^*Eg(1gRUtP*9(w0(bZnGcXIVpiZnwBvNR%6DxsLVBska^@DasAEB#s8`ZoiCL zLG8I0{~V2}&h8eQgVvTwu*T>|-v2bhT2AW)6>>Bh$_8^lh zicP1K!6sK8`;k(Hm|VwL2Bi!&xt_5x2s7Eonp{p+=vP41M~C6qbOxtEw+UMw3CDtDvF6~r#@ zfZQgND~i2_g?8>1Mgym$Jk}82n7fr)J0%@eI(JIg$#R#359r8lI8W}bR+u9spXW64 zO;-^9X?8U2L5}dE@~3>`5uxMEFL{V#2O+*uUzn##tr0mLbMQ`56NmcTJsDnB+c^+X!cVcd2*<>E#GVSMnlDABrUC9VCzp?>I?+Hvvo@mKk z7@%3FStjXrj^H_${ECG`);vFx#Sy4@{3Vm%7+Kru;qp{W;#B%sf0N3k8%k%rY-rK# zL}g>B1qdFNtqrF|%qCCKp+Ynu|KJYPBlqY)fWf|y;twpq!x;55l zJnfVBFyymtun^rDmdisx*@8>f_f{~y!S*AM2IUv_#)?#X+i=cPLphZ_vhyzxJW!N_ z)No_H8d^a-aFlnc<(^b6ErMr}atb?ctskK$s3=b;Owjb#EOWPekP);$+r^ zb!76GR6Z{T?;|ssy)IEvo}J2OdQ?yUIfU1x5k1uNdA2G)uu+F=MW}q9waN-~Vb-N7 zwVEw?K2Kp~B!Y6*l_@G)0MBRTWxDISdL=~n6L1BUpW!zr$v%I^2!Q$WdA=(b(!V~f zRa2W#MT^NB-V?lX$3$ms29hy=Sb3BO!ZMbS9*)^9*Fy4nOe`mpc_>v=EhEYUWSK%h zJG+wfO51qDvt?OIx9iB?Bf7oI;s%k=V`pi~);?q!z^Z&_98n%f%MApywGn_LNJV)< zEprl}H?nkHxxP@6it_MU&OZi-rjptKgW$Qg+=*!#R>|@_CFd&FS$6d^`eFKF`+W7><d9DO)qI>7WY&+KI)nK#>0 z8C|@#52G82xR90KWlv>N5MA`qbQgWzoqiufy3_UQAd(hU-9Ci*^~qCym+t()vi{0) zeqYHVLsa*VJz9TDi+l=>>i#!y@mLu4HyF+x>U zk3JsY_?s~2bEP8|aG3^7C!irN`C~d^VP+&~h4kl>^`Woo$pwi1%5wh1AK@addOG^i z+f|Pni70>v=TENejCO)%D^fk5B#&QN&Yy7`Mo3a=twn`QMjXkXa|CN>KbNETcBt7U zbPyt1@HS81CPqNl8)(;pPEAS1Qc}9dK-3>nj!u>=B4E^>9&M_nnP(-(=#MBz@xg2j zv<8KEK6dF6IvK@U@U{g{{94?Ks?xrz#B+gST0SrIDhP=QSUmHwPN<+~FW(zKB#Tsj z(F=zqO@&Bk7ijoYImiuB9gCpf(RE=|T?VRMA?|}Ci zM#$)+02wcxVbyvdXAm-%kSX#ACN3AEb>`Mb^|_RgzoW~73+1q{A{sHmi|X@$2U#rV z^r@9nO$aZl&pRGuskFu}$E`@~%!}&ts|Q&jmtm%GA-YAT(WP)8DQtr6)#LIzoB%d4 zKBD#YE~-y&0tchbh4;&^P{0MMDtRIwO<6CA82(y;_Mx)V2;PpZPtXZjfvWp5mst3y zTZw+;*qzh2Mi)NlGHM`4b>HnVp71k_qfj>lJP-16{SY%$yQd?-nL6HcabCY#NT~<; zC<$Ve9PDuqxLj==cBv6iHieL@*y<1PWL2Bd#)RxuWQ~ROf}hF_7``hx4y~niTeENW z5MRmr@Mk|lcUAT?9^xli(=QpJ9V`1&BKQ%_V9=S>){IJ9i^@g-vCO8_r~4s}n^K z;S*}N&g?B@@-v#jO6T;{k~=--Am?l>VUkL|NajbFtb)foM-YU}OkpBl_7`M!pi7Q* zu7#PCm>H#Da*7F#ceW!8_?i9MlDQprE1c{67h%ioAg`ZQzacIpxWIW4-RXy@?CIog zhPJ^Pr)x!u^b5%RH%4UP$CBMFrD#7MsYo%pCAiJ`G~sbo_QPcGE6c%aoqJ&{{q)F2 z`Rsp_$q#=9?{b#GX*d&F`lg^^x;_=dRZTi6>(z|spuSkEkyX@Ms_|I5n5UY8HY*PkM#nB%I@XDf?WbD;8>|J zl|9u(6tv03Q^D$`@9vOi?@YiCR_L2(-E(mnN zU`s{#oA!f?EzFQ3DH{2jmJ3trI)j@6qw5Vgy*`*zYH7P6DXj& zS@+V@SuHO6Vh{IzpixRIHOw^iQ4e-Fa153Rb!tuR$}ijkg{R959Ocw0;A?ak0dRn0xd1D5~sz_}=bL zrveFCnuG*Mx;t%$h7hAb1F}e10)#EBiULRS!#22E#oplKr8BhMh{GQXHM`A{Ew zS7bt09qo5daGV|LrK&sIo>R5E90HAhp_d@?he!m|W7kDu0#o^ zzMuOEA_dRo8JCeL2d!@sbG)`qRlzKb*kD0&*g(mr`w1EgsxXu6K7pw`(Z{wF48ly_ zH8wDS?(h>FQm_a!MAr#&%eu;U`~>R?ev7qq*9oNf+L+~-S+u^J3f_wEE`eWjQ#sC0 zaB9KC-8LrI0lFC7cOw)05yPx*TPWULbMRJ(xiO<>(qH@joGI*Bq_`wB8aHHCqKg!{ zP8k1drnm;hue76q zy07isfx9`&Vk!qy`wi+C`Lv)PE;<&)U%bquvNvYe$hQSAVlwNXTtg*)=sfa6!5S2W z4}pHl%c$gUMc2I)dI$@)U;+KwO0`VoE)N^&9lff!Bi5e=P34C^EEC<*rz6ZaX)1f* z7OgqI8+r>x59Z8HaWl;lb~*|enHU|DXDyKFQV&-5RcH+EA3C0{ zCDv5l;lb*haQ&E$us{J{C#)DN#>kfFO+_7HfdYp1bYXQh;Y6Y%EWd#LJ#6HPXnT4$ zSW`LEhusmKlxH$fz+(v82ltK0Bhit#8waZ!DB$%TtS%G2wWT90zktts*vOmFTe0#D zn)I=z@(UmKN%RTaK7z170jsf(V;8Um{~f(KueyN(PViuLqr=nxmjbTxu#sMc+cAp; za}E^n0uNT#7GA#lF}Ra3ZmvbLg)4EB3t9^l@UI@MZf7yD-)drtTfw94+WlD%67gV{{VP+Nu}FLNJrBglNFpR^7;tjH7e6R{QCTC-;O7RtZjn(XQQxJdWp zS5kg0vW+~yoO7GXwGD^ur1A}pv{&cDByW3O!eMVy=kVe1_iJ!_24{k(d`W05I#{2*8zfof zD^$u)$zLA29aE{cuB-f<%3Cm)!dHg|V$AQ&G;W5K6}W+@^5-E7rF^QdN`28(*-V7| znEb7w+tCjllF#y$cZW7W**W?)Nn(pZ9UGp86VJ0 zHEm}=%EGCXG2oeV+qF1UxCOF6)380-mO{0H zJ>AZq_r~YV6trK71I||ZU5+o z_R8?w3KV88CsQ*oiDOw7v&9u*Gm|^@ttd~zEyp@He3JPE$}`P&EYB&72g>sT;$C?; zQ?NYO!uBd;Hp#Ww?uIN6-^t89$}dk#R?E}&6w+`nSUZC;%QohIbK8fA@!d>7x8GkO zn~HyJg?N(VE^t6Ob-w}#IgF;7{k!8{brTz^_nkk}CLirJPbg&OZ-9c3eGoZd0g|y{ zjzFnGwR3mibiwmD+=+N38k+km&O!EjGGHvzI|}>4gY7jGuo@3G=X$`#p<90M=KBXX zj1-3EPJ`SW@Vu8F5(e|ThKBt7=6vEBde1fRSIs_m*a6!BV?(q+44B^#=M$D| zh+hK6hFIn1x7H1F4e^SXpBnNT-V6`sH|HDI(9y1eO`f3+4dF>o2xtQhQMfN^2?~d( zS@;7kfF9@aD`#sLUS$zEPb2?>s^+@BT{wF!BB4S;$rzSkVuYXkBJwFDl#9U=(K9Te zR1C{qlq6Ip1`j0(m59L;NkZje=#Wrp7(A3DR2BwLBng#-!A;otjqLxvtn|{5alu`A z>QeL@Y$0@5ijr}GE-7$)*g6Y?&CIz5s|I`h(;SHUCmzV3+3ne)0XbD!HBf;S} zO0bGfXHJjR;t||q%_kCDTUt)xy1gaXAa0pGHg1&I(HBc_%nk{TeMf@h#1**5@nscE zx4Bk=6S5MV)F#0xDe2Cb%Om!Cj*yxceXp?m13^doPyYzPly3|0M|?_*jA+ z<$aiZXGVet+a!2M{DW&AK1*Vch+}Zgqv9A`v+G+CKG75Nr&IH*tOQT)FTqorBzSte z1kcC3x!w3EtTy!3TdtP*l^+{vcs@ z+XpjlQBAQOjj(GE{;~9FEJpY|E*{9?uzeVF3F75tK#CV~81r96 zYXY$tb+I%~O;0P{1+JoE1r)gDm`ZU?u2k3LA$)@F{w(ZlPvgn6c3ANrajDzl6^+QD z_*!&aq&U708lrgYXsqUnU&95KDBd#Am|bu=9fhZUTO1jWAXMDkA3>ye+!zE-@&4E- z*iu&fGD^NBQQV&~q&S^WB>51T-D!)TMPARF;_a9oP7M|F)1oa~BBr~I8gtIiSe#YG zuSs!M0}(3T)sMyLja8ddoE^sE{1>;D9&r&9URXSli?8Byda^c;bD>zw z_vn%0voIw|F7p8HhvG$QcN7<)Jf?WCfP3H?3l$%WP9v=9VZeTYcCft~kiD&qg{o`N zIJLb)zs1A&hy-L|TYVg6s@guG20X{C;*BJDuB5tDSi>0mJ+;IiYIxu zXC;M>X2ML8H&F2;?`GrzurcOh61{Mz5>N7O2db3MuT;JsHo z$xD`{=}BI42o0gd8t6%0vRq>kvj^{i;z?ezN`amyxdMGp@;$B-AUw%Sj#1#Tl=v1m zY3zUTB;S89yzd4wb8#v!&pDNs=bXyRb57;uIj8dK(QhL^aVoE#{xMGb^}!fmi)Lov z;5(HU9E$OtJfC^=@7IEwe;?Whz_>F8#{pc7P^AsXRy4^~kcoeJ$+02oY>X15I1)i) zQhbr=UtV&Km2>TG*S|c;m5lh8mt0^qwRRxn2RbM4mu{vxl9uO@)G)&)09ba%_RQiB^QNsaA~GoM2UZSNqv|@ zBj{hAmKe>pE`RjIyG{L3rFzr0fX%T1EYS#cvT=^xI=C%L*6H}aBetmdQj;&NBq z$V*NL$b)Uh<*~Stm;A)?;m}^MqXCwM%9hHtjdk58+lnTH>z&rWz}v}-N?&o+^D*dm-TU@ z>PBAH*Nv(hd0DL+RX6goxQ$~r3Z>MIyew(c5>+?yvXomRbtA_wZLxJiDRm>qFBv_D z@!`3Vmo-`5`g}KX5X9dJweLn=)@&PL+jk=`8*E2P+;t-_8)aV=yz53@)@pfe`flW9 z6W#bV>PB8R$&I^iLe^PV$%oB#JRYU$7T-RflDejs}hpIbR&<=<`TlB=|&!#p>1T+bR&<=X#$9D zSDf7n(HP$O{cigu#`j8@U&Y zq|a($j4cH>^7NH~SeY3z8>K3{Yj1Y3xRIAVkfs}X*$(Y=lcpPa*-q_qCrvlor8}j+J@Z9ah>II} z=|cfCF^W+zRwT*iWpN`f?djg|Wd5Xetw4g0(&9#5`Y)-U&L%pg@%dWZ$V)vpa{tz% zHzlnOpTEV8ycFNTv8*{8nn{w6=HfJMp1H+{jCJ1rZdZE`LB!D;45n zy||H=dT!)e20rJD8+qx2!8Xc@8+p(E(ws>?{)-!VY2QF=m~!q1h#Pq+-N?1>+#C=$ z^3wNRgMmilmVvmDmu}aNk~9f!A&47!=@mJ#+AHV2g1C{F(v4gSUCvDgaU%yGh00{H zYm(e{5I6GDA%WWHN?3{^ON*?yk(b^oZJyxfgt(EH?qnb9Q=mz5vqId+OAmA#Hdl`% zw=l$wy!22v9~mC9)?k%}n&bvT=tf?8eXuCh#;8N@T!D)t$^8#;BQO1hi_1Zh+!YZw z^3qjZ=A)L9+%*w5^3pqkHgbmc=7x&6kymBYbR(}C&H{4^p&NPCJ{oht2HCCOb8+p}4jV01_Bd?mIu}qq7ujH8wj`*8+p|njm=Hdjl61}#^$Bz zMqag0WBaA)Mqaf@WBaG+Mqaf!sn=|*0CMw)Kq)mu5R z*cEgmuRc>_k@R~gK=n3_Icd6)SD&S^;xygJtIyV0S(hm<#H%&M4YI%kUr4nhnkym?eo~$Jj$yd(`OiPm6;e1CUH1*^eC@>f&v?(0K=Xec!@Kpq(^!6L#m_zN=6blP)U#S z>L0X0DS+}92x^%;`3wm@hGqUNJ^rJ1o!{MqrCc)pqTrC z;!$4hdXzI`ovh_Dk`jnVd2BIn&#p&#l3Rx2Q66isTm*Ql#=|J5M|teokjg1R0l8L8 za%)jM%3~jd0~Ja$PJTJCa+M|teIV3nH0qdfK-l_ggZMUV2B z=TR>0CsQMJXacr%lG~%=Q6BR=$~!*M#RQzZlQ4Re$M$rmHcDXRegw5*iCe4UQ6BR= z%Eb~FS=xbWiCeGYQ677vbBl6ARy@jMe+no~k~_5GQ6Agmwuso4+PIA-uE+E!k6q_N zq%&MCH+99MJodF~n4`h%M{f0sM|tez{PuQdcJ2a;M|ted-5|I@EFR^t^SUgdYls`j z;!z%ZB)^m{f_uy2Q6AeGM994=$sK3$D33kRS;>89@hFdZ9_4cJ$bgl$PjHJ`Jj!F| zvi+D35)fUsAD=1uD35s_<$h{+zTvw9@hFdZ9_4w6 z1YaYFM|sTkC}&nWpRwIdk}nvyDJyda?q4~86h6!u~M5~ z0Y&gVg?N<5dhQ0nR~X_^9_tlE$kddF;v#iUi+)h(~$s zWi6JJnI>`TqmCZsv5x{e0!)JMO2nf)_DVpf>G%RgJj!E#59lOak}p-nqdXRM>)>ao zV=~FtE#grg8x_*M5kY7#^Bs(Ml*fMVZqWEjx0e?aL67p-ZSDq59%7hmQ4f=SZ`Y%I zxOkNJZQ19JxG}QVlo~u@I}JULZcB}|6;xO=;vQc%-j&7fgT{<_kqUa0r{Ga8VE2{4 z$bV2nkMfjwl*<~TVi(QDSX48zl;;8vk8<9jtP9|qcQZ0e06of6;!&;^BoF;;QFeGo z?N5|Ll~2ehp*9wIgLnG%hOuEsj2ln%IlJx>*C^3zVCp+Mm@^whqut9JgmPVK{5O- z=usZ-+M|5j^(<8{%zR<>C}%^$C8L=s&A6w1t$LKZ+7Z<1^WV7l386=MSUt-5)o9K@ z2$sd8Jggq&!?C8$X%p1`F$R%(ly}gMA3-fW%EP`#xnyj{9ZU^9%EKKz%B9dcSU9}Q z__K(kM|s%yC=Xy31ENp7}Md`Mp_X8g@Q#P=u{h<`BSU+^KmN4aRFM%vI{6GD&jwC_=_V?@g9AI$h3 z*rOAV^0e?z(h1aL#9_Vy9_6Z- z20LcNS-t`uOe6Dy893 zt_J)rRRiUN2%$%Lm>%VVap#2Axm3;N)vO-nvr#|KjMTODXdj^-JnAYFPg~5P^g}kMbPEv}UQL zM>#$s&v}%)*5^?92MqYM=TRO=I%zev$qbTu9_4Nmo=F8g%ERhWE^qzaS-$mFD&@;< z^(dDw;Z!LXiD`Y&N2o`+tUdzc*G!~83_Z%j>QUaw#Kh8mEQNpT6(*3_ds zi1jB1(^`xkkxawtQJxPot%E&8T0P2x2!DbwlV%b2GM=i#o=3U%zSM2xdTQxWo)(Yt zpbf3sRURO$9_9InVAY=Tuxa%u4`KsV`_O}h)uTKgW?FmT!AwdOR*&)^B2YE)M}B@O zs^)o=Yt`g-mcz9!qB+5-M|mf6TGexXn0l0V2@BTuK`%jCJ<2;J2vquA4;xmG@-DHa zwFHxZ7F9jUJHZ0g8scNsqdYKm%FR5c!}mbK=usY4kMb@}n${D1n0l1wTQjXU_z3kV z&qwGAqV;9M=usY4kMb__i)oWbG4d3v9_72o2HSm@mny9u2fz+dX_X$kv3?Hi=~%n>M|sx?xDL?8 z=(q_?K#%gYdXzH(me;vETyEg#^)T*erl3c8T0P2llOop%ldoV3dX%Tdqnr)md6e@K z(?wY8vqaIOJRS5XmwT|j+LD&%!A$yqSbCJFJ&$rNFw0_E!+1s)kMgj3l(Q)Q;w3>P zJ<7xCQQkp0no4?M2R+L3u%`8K9~Sf|?^M8#2%|@NIOtK{senE4 zl22giphtOLrfva8df0H#qdX65S`YSM>7Yk>rvjcy7(L3vL67oI1-#FLrGp;jd6~Ke z{G*2r2R+L3u(;|6xjCnU9_5`1IDjyEl!w)$d};#mz{=arbQ)pwC=aVgx$JVgFufNY z>mkzWQ65C_dK5$MefoAEs~+XM$7(9Qo?rG;sYiJ*6(=0i`n``(kMbZQR|dhhNo~^V zQNDYeR^QZe*EjBDaVgZYh2oALPr~^yM~_qv6ZyX9;dB#J)!_@%^e7K|9_49zl!rZ! z@-#il!xxVLk{;#z&cu@-ZB>W2m(GU?Zshf$8&6t1%5^WT4?W7$;!(~IimmNKkMgv5 zl+%EhS!{SjM(8=A< zy(PXzRH6>GRgdzVbsZ*Yw4E9CB~kP!54#@a3KQj|GFF-%I~H7D5_dITqFB z&s4z?YwAfAkQ^SzZCE9C>cD*eVKGWWtw>>GmBO%Z}}fu8G%K78C<7T;0wxC2a^| z%J7AL^;^{t?O})F?V~5+FX_ey*e9^(wAw{2u7qeGdkSj9uO5)Fgs#RfptM$gQ8&KK z@|Q*iOc$k@zVZ;vk=FoX>0J@uY%4a?lo`RVyHs|XD~p{1jT!MfUva*tkgIycH@@O3 zPr($jGpg?`MpUES)#$AqAcEseU`7lgKbt#|7aVYU-haJoz`&V!vqY4D*nY(bYUkz z>+cEpQ#AlR?HUY}C|S6B^55!(xxt+eiF^oO=@jD2Wk+9T0s@fLeM z42JDY`yYZ`;8@G9!WSVhI?UNh0Z-#{I(q=dJx7ZZzsHV!cwfkWWqC=-kfa|;{Av3l z_If#Kq9)_tWBH^*p0jbZHHtwKHntazX3qCiV|>M5V6_99xn)FA^IPn3iUQYoA2#@_ z_S;^19oveLM6qxe!q6~hD5AAIjmzomH%8*)OO!Sfe+w@;dP4rGWyx6Dv5x^9|A!?8 zpJs-~eus%E{+U~wvhY}LK*c|I_0{3AJF5}@!tx7ee5ezy-o%dh(r!boK7tKYpt^x3 zzp^v21n@*-IW2r`Uw|=BbUcb5|Hd}r%JoFOI5GUw9up9K#+l(;yA;FNLv3UYzO%o? z$p1=n96uWKg#A16i~q-_AxA69rEkXXPt7dcd*VOZ;+lY_{TOOEsl>a5JbbJO7mdDn zY`?_UchBiLC|v#BzP>o8=O8fcoX9Nw)Z#sSm3wWr|IJTU=IbO|Gu~lsWR%20e?;mI zr846hsLtRbidTjHiE{F4^B`vY6lx1NGLoSwXj)Gb>&Ys~l`wuSF*CXWvqy+8B0FM! zfYyw^fS6tEn|`6^uut6)!;xmj-%m^t#&>)`=;@XKhU(a#XlItKWIlsJ@0A7gRB7{J zY%&se{(@ueN{r5a>`WS%XZo=y{67zX2+Che`7o!Hr(ltQIL@(S7#rBbMLPCkSP zZp1ezg-W2@vG1g2`w_uS@&=_)31r|RjwON=Dh*1ZqPa-*T*P)FNTJf86e^u13l#C{!AhLWON^E3^SIJ_#P?ya!<|n>Oxz%2So<@dpYpC7OaCH)8fK=Dm6E zea0BG?kAwT=wF>~p9oKY8qNdg{V|r}{(Wo(A#ywDfs_57z~Pu5;7b^(*#C{m2z#;* zI9x_-AW(F@Yz1-dEpx=#zM|SwJPVN-c{3HYw9>oVB)!ktidQH!qiqDm!H+3G>m`!j zM%BguG0b@h$6B7o<8Dz($rSWwyp(BhbFv3K24OXn zD81ePm($QPjWmJhlF@wP25NKfCU>Hz?L1c z#h0TARIwKnv5`D5wK?!=LSDTD)(2Y_$HjSzt%s;S097x@E^mX!4HBZ4i8CYLpzhZj zpc{JRd^BG`H|lHZ9v+KQ4ttm7a&srfj4Z<)%RWiv@Kz`%1``gVt`a(1JC;W`q3kI! zcQbMh5l7vEY)8&~+gp}Nk+jwek0szR=Gs_K6QQzDpk^`t;;>nC1wkRI2mJ;|S=@!n z!V1(1E_|Gr2DZ)Em$wsM76}^KpN1BGKu{S$<5p^J1wqg{Qh%;M#cayK$ot(|9TIcyH=F~OL-ap$vJT-XC{ z7!HTb%GU-#Gm3pYlF_sG@S+DHY7Xzt0Wx}*o8s5Gj8;vD*;BDxv@g5Dw{ptWtUU^_ z_+Wr|L2C`9TxQKJ5L#i89DW@JD5LMVX}@&ClUWx2CYT+52h=+m{l{yJ-izpGEZQ&` zSG7!C%gJTMS2*LPXfD-ODRo1%%38Jx^|m778E)idmqpanaw1whveg*Ae$t{cX4lar z3$K{C%sgW8;%8&+<_HNyOVhSL4;^(iNLpZua?u^;)y8Y0*tSaT&#@Q zgI9yJpF_#i^7gj)a0vNni9)1F1J~!`pF+*;8*OpMtXx;&zg&#-Lyj4XqQVq6WAqw( z5#mKHkV%%(p9gO+wio3GqoI2J4kyQ6%YN=9HxnsoE$8@6*G~d7BGg;lw&;JEj*=Z{ z*W+%4;tQVKfs+wYob15KBAz^slb_?{_u}F!UG{S8@TMDn z#saf0nL@WNNAiSqxrisM%b##^d^Z?iU1qxp4{$@%zPrK)v9zZIkud+n1~ibzoi7EiV;@)1zFMc#a+Ywk4H%=ulKyCz^RFeu)eqAm5Z zo9NohwMMhsaJ6>S|Ej13-*iuW})Y?)P zxp|xqsaLgQU8HeK<+x$TmU=nA6w6QwucgXH=FC0gnp@_YS=*(#7NqdHKi5))MN@H+ zk1&Pi6iY6#(sSwRYV9eOWMZY~Y3zSg{={y?A?uiSN#sft$;U}r~g!er#xyjsj82VC! z++-bIYLJ_ZbOzS`|G6Vx-FXkYANQuY?3bNE+21XV`ViHk#T>duo4Ah5>)?VZhbcb}SDx?Ut>! z5c6?uU%RKD*4V9f5K`49?7H3_(1f20R=WV(J+=Mp<@LVM*=k>j zLpZ>G;pbz@w%UP2HrcJvxych-?U~GdZ~Lv|3sKCE(-01^7oHvXS*G0tCa&7olc(3@sKBc>v7F;)k; z7^___##-Sxie@9+{VDh_78$i2xESBUAl|U?5K{x@tf7oJy@$QapwIjow4>8!K~{oA z3nf^5tON(2&!Fy*5{y!(Zuuy5u~T>GBnb{%D#6NQB{=+i30B=K!Rp5(So4MiYlRrF zZe3rDE2nP5a0xapmf+}!y&bUj0ZNHb`tiMWd_V*Ip!=IjZ>h2ZJ!Mghnk=XqwOYp$W4ALW>mtbT+ zbgh#fHJ(Ag@k4R(IsGP#mY{XD2q#UD*yLFfv~84N%1pw|ptV ztu?r2o&L9tl;Do}65Mr^1b1H|!9Dj#aPMy0++T=E%<2C?N`f6zB-puBf(K8L z;3579r_=x8Cnb2~3ke=A!D7qlziTfEo|r4aua1)7$x9@7YNrHGza_ylUrO*Se`3<< z|J+aso}Vwl3&%kB1#>6a3``m6+Ry)VH#Cc$(c^hHqAzni^9!tVA(35)G#R1a8L zo@8+N2np8SEy23qN^k^!&)6Aoq{VFN3|LRJg-`>6z{(`&>TaK>vAocXB)+e+~m?hJTgnUv+l zw45j&lb5{s{!#Ru97+q}v6!9&EQQ zmhtqbMp2%LH*3OpPDFjxaU0{9nP; z7{~oum>OIDUotgLLqcI{Z23Q6YTN*@Fg3RPk4%kk0Ty6tY;lABAKb3^&Y4V& zEy~n5P(|gBBcU)gwvegulvcLBGBvi4sZj)er#J&=-2#L&aL%0)%)L;8g%3!u{1pif z`%HqB(TO10XvTtN6h^4a)Y#@SH6ol`E~5ij;m#F^cMF;_OvBJ(w8BC45J zY~Z=+yM>5ji8#{Ic2}TU*`=%+&kv?nP4+Rdf~PUAOvw<(n4iLet$UgVM&W`e4~L$@ zu#^F7D^A=7v8j8X+IC!dFo zv>3PDD0#-R!miM3+V0bl;-TYlwG}QXp0^C;ERHT=qHY&0!g^15EMwbPkk=3a$gy$WkTvG>i8c-gFn5k1xYWWgy5--jq39t2%h z_RSlJp6*8Xs(>4Pj+GOh;_-*ad3<^oj~8!5Q7g+< z;@GY1SRPlk^Em4u9IJj-WXx?x9Cq$qR6- ztUH3oPgYa%&2%3BGKa@1T83jmzk1j`_jTl0lIVuL#lA3I9>tkWLet|4 zB z$CGAKTX#5*{~d>8ay@hX6k|B@m0Y|rb27@tgGruC-FdS?;O^=kN4LdzFv%OK`x0#u znKKu0tDhFsKSA-}#~{Y*_kNV;?A5-%@`16v(f$?U2~m zdpO2E5@k<3e%Oy&Q3%gspcIW%K90r5&>Q&mn~hCC=bGd+ivNixfNl$MNkS|)200Ms zGBRE;3iH z#7#Jw`Xm4oWy{K)hki|`{qp}?%G<4bPu&YfT zJ{)>EFmCG4Cg|T+z`_kd3p8U&$UC%rEC79eN!SB0Tlz5Pq_Hw5?P2eyq}W=qqEhdo zDM_JN_h6lB)1NT$QzJ3t53v%wTr$=UAWLw;v=gF;bj=dn_%-agEWwRjz6?~B;3lU8 zk;V!fT$bP@AIO9yxbG@i=SVMWS0_0>g(bMJ%MvUXfmE26t!Z*-DaofeVF~X0tfbHj zMk~fgJ7Ed#Yh>|-OBxcByo>VX!;I-WUQT28Kp;Au3-)xHoShIgM|bfOb6m3E=o5@Z zqve>hLInln=*1=?aKh1>%yIN4e>xr66l_6vCUYFUNqK%1n$7s0)p*?8>#taT2+d}~ zrP)lNhhPtjM4{PCRJdzKrP)lx+^EuQCVIM2rP)kWx>2RsOjNm1rP)mMa-&MKnW%Q7 zO0$`$aidDJndsw2m1Z;1*NrO8W}?=OD$QmBw{Z;EP)cbw6G=-7zEDbOHWMkgMoP1p z=;ub2W;4;>zAN=Twk+%zlW4LWJ3h^3Vo&=l)IQB-qS;o#wokK}7;GCP?$T@~M%hmU z@6v20S}pt6r`b$QbmP}3&1Pbf8+U0o6O-MzOS75CquESMmC}b(yKEVly-Z@d9Vq!f zi7oxDxM#%oWxwuCn$6l}_HpDrh|9V?@w-J%YWm zW)o<#q}fc(a0fwl&=|z#II=Q~n4$A=!a+amBes$mbh6kRY|85UWciM_zOS~X$&z-n zzV>@q$9e&m3a^LZzXn`rH)~9GEoS=~!xCb!Wl6i~#Uk0WS{P%?A#wJ~K&;H{GaIE$ z+%=qCF0`A82ePEyOzhA>V6vp$OzhM#;ABa=nRrC&Q-*@Pf-*EYY^9;>I=?oQvV23i z0pm+u+RgZU*1Y^cs4QtWYb3e@*$PXu9}A0W&d*Q7~4dp8G*UyBY5p3QSM5 zt`$gft4L@!OlUXb+qI)4O_Ez= zLc1BiA}3aR<=i(D+RZp=H>J?!cTu*C{Y~0U%H-~0>bb2Zw43oEf!gS;MT#*O&0sTpab>-xJ!+_%B>s4pPruKcU@>V+ku4NXLBCQayJCg?2N3N6<#j z&~R=T3hib?HcQ&ghT$~CDdgW+7B%dnF(*sf&4w`=E6b90v!PXE)mhSRHcZr5B1_uM zhDjRBWJ$Z(Fj-@JWeaJgO=H8eq}^;;%n8y0D7|Lg;_ zvP5I^v!vZ@I9Ou`WJ$Z(aEQhhWJ$Z(utH-8X7{AcH5yx(CGBRzT8%9-Np8am?PkLX zte37zgGRXXbp3C9)*r%y?v+SuTq+7jYz!j5Esj;Z9+6uLyzy`6Yrul2BMW_ij$05*=AO zM+i%2X2*VPqvRUsg{3p|n4HFzLM#X4{oKG=gpjN=^8}SPMgcAsZfhrRrt)N*Wgb!` z1yC}Qe2Pkvb!L9hZlD0l7Z5ZudHqL3kgPLfWhrE1lmPM%Lc|Dn!G)|d^O2N95lQZf z3t4C8lc1QpHe6MK*%~%i+NLbKTfabM!Jx7rW!1l67B-G{!GeA z)|q-Xq;g77K(09J@1k}rwI77!JjSE0e_h-I5V${8P|uBdVeU-5?i!;uCdm5lsBFi2 zC-td2y-}jdCg%s7HxgaX{d^(uOs#NnOz+-&xXUjjo~gA#B`skFd-XogL=M95E9RnN8%|>lLxN<8(}2z zOzr7Tl$5{-H^3Uj5?@3JiD$|q@f1s3vgy5BvaRQf3L)`Kz0tXG`T9agJX3!PC`~=z zX9$UBYLnX{Vq0qCHX7e`Sdw_Au5%&MFRqraK!n6I^|fo5qr~k}z8Db_&(z8J?d{$t z_|8N~JX3e>2Eo@ULgJY^ugem;hWNTgNIX-I78uyC6SPLrry=7JZH0>Z;ph-Gv$$ZcI;A}0+M{QBqW}xuW}|# zl5d)X#4~k4Uam6MOp@=Qgv2v-SzeJ{0N+jtiDznCmlo>zgG@r=nes?Hr8y}MjOQkY zbxQK>mL-X2$|dm()RcE6A@NLQLOOYil44vy%)EX&A#;Z#`LK|jYwHWd`}GUHD-$ly z)aUsH7F<2wq6wF0%H#6%^KqvgzF-qB&y>gInTJU7-J5WErd%#hW~GH;^P76Ul@l({ z)BtxP;Du!B`I=6+JX76Wx!(fX;PrfiCtRMXQk!1^wau4%!sVIjxf=xE`w5q4s#g#p zLtJg}m7s8WrZxty5KO9kNhn;NsVh4ul6-9_T%M_ywOCScnlwqiOB628)JFjw0VZh@ z(2eAc;FW+*)Agn9ygi}&dq5}Y>i4DYkYUh8-8%RgYDid9HULbq)#oVJe^-t5tJS0tx z3dwnJ;OS4Q)PF|IB4U1SwE$9J4mx*S@hz&bho)|`T#mVuM-DNZFId0Cddz8DmgTQb zH6Eg?CNqY=I@P#bV-Yi))%X^`vc^>kT+L@TEaw#H|LWB6vjO~GfX68CSeDHPZqk@z z_U_k$?7k0O3Vf4u6^<=@&C3jR7mQnBrv7=zJ%g;JBdOz!wkKh}5`B|sN_uL_(&C6Hx^0Q8j z-p@M8jY2LGZ6v;$m)~+~Ji=`x|ITdc`NCd)a;dR@IR7H9=lgs4$)(0?+-pzs29TdzYFuRXrsE?_J-;-N-&$&1W_4o7Da-lc zf&9o)BY$K`24sDAEDq&ImKuA7b(Y`}4!3w>vcnFVAY{QqXSVJQ+;tJihR!XG;T%`2 zHLZNrbF)bIFYV(?7vb!7kuE4@I>jdNM0PKw-9MUm8l=Jo*ZuRQm@)A~Ur@&QyMt!m z8>m};r0M?kjxpGihs=t`RsV|W)f-Ls?*=t?R;NCq`r60v^FiNl#<&Sw8qyHbv6%?g zn?mc@-PkR-GAMQTb20;{Bc)RJh<#xQ-c@$IxU>|RnMBP)0lGW(<~{W-vimd?$meLH z$RE|c*q)E+lf$Po>I$Mr*w?+x(hEF*V!>j3?lzenL~X^gy?akPV_)5Sw6Vc#vdpX0 zeSv#Z_ulrEagazp(wv!Zs4LDwS7-M?I&Dy<)5MeP@^vdr_j=2pP?Lm{?O*Z4-aT!< z!rN0dCE1uZd6Us5uSZ-m={Dy-cwjF0ax~r&B#E8yAiH>M%aoI0_oCl zJ7aAH71pFr^JU{*S?mmGO!{gn$S&=GU0T5IOR3Z@YVN_@<_NpAv{S_eG$-T7q~7%b z!Y(ac=K`cLQiTMQUD^?LX|*7ED0_+AR3`Q(itN&k%Py@5qLoJ_)k*}ZKb-)(w3Nh5 zJe0cYu>x^icIiNBDaBM_mv&rs>716c-Za#p%{d1q8hP3zr>$QgzKiWTc<)k*c|oNg zXFls5#lHMfTR{XG^(|kv%$4a|)oH>mJp}C10`%S!HaYzCr_K28Mgl-wa*Oz;o1Ig6F%aQc7>bP|ImQq?VM@ zQKgjbpl!FmN+}YXnZh5GnF0rL|9I3KLUQDWz|h zF;xCvg889Al$6qnOIKldi?9fk(wd2U=az2v6`+(>g}ic3&-N9dl+G!1ph%CTkMkj* zl8Mgl2UE^^#aAk&bTDP+b1F$G9aT!{sRe;NOZN1XN-51xBfUH` z1E?gWbW|y&JEWZCE0t2ZL&`&_B&BpTNGYu|U*=>gNhuu-QcAnZtEl9CB^soZR^{x+ zsk|JQc#%gbo!8H1_S+sL>QPGPASUxYwWO4e1}UXo>%FlBEtJwl9;I|3>FmMOCNaH7 zJxXcOn#@cpNGTmvN@-49eiN>zl1vs+rIhCF&sFM;G;@)UP)cd81HFMKSJ15ch*-MO zM3qvylZjccdjO@B?qnhJ9RXy^h$^LYaA4}-?!A|XP)g|_LI?LKAEA`eL4*$OgNQg8 z#V=Ay=^#RLA&HwJlu|k$5tuhJcllVQln!G3iNR#(lO`skN-3QWGns#Th$5ww4kG*s z!pyGT8;1COeAJ_q*4~%8rS_qgE7T&Pln&a^sx9{bQKgj5M+B>OwudcJO6edrP_^4V zSX3#c^I<0QnvYOQ=^!Fdwf_=Es_3XkDXmrG9HQmY4%}mqhf_-FPUf_#^L&_6N_Pnh z*7syDL6K5QcS;bb^vxbNs+7`QVzJ)$VM;0836`sV=084GDWwBbr`*il$BTMChLKV_ zs+7`QnlzaSK1?a4^R1c8Dj%Vg()kEoL1Zo=jFi$*rIhY6KmB4X^P-PcO6lEWgYEu< zm#Ro9rMpfQX#4?q+S4K_rS$F-n9PAbRw<>s#s<3K3_pQVN(U3jNR)#Pu^m2EDW$u{ z21@?EpFk<4cb~vyDsf-ZS}LV<*VsVGC;ADLQo8E|xn*7E7(anhN_U+=iqC_|-0UY% zO6jf>_^o6zZ~6(8Qo8E|=ko&R!PIrf3S20qiD{Erb;2BepCOdeMM5df2JtARIl<^6EOQ=F+!81XQcBA`SU=m4mgm9DzKvK?N*8&Q z(pq4a#blnR_8Z<@l~S5T@$bAJQrVkNUrH(6K^ejAS}3KXN-51JKR@MORC52LC`c)- z1@vnx&1EwCd)R1@Qd+Sb$}a3gAC`%3!6VyT6yIA0_$E!}Mh{jLq?A@`(t^?|nO_q| zO6h2jQo2*7-+8d2Af{X^RQrW@L)wjO6g7oTtgTsrK3Sg=}rZ_%!3ux zgcFI~w2Iq;yF6?(NGaW^fFJs>AfCWDmHd03O_@4OELdkW#u+0ly%Ol+w}l zh5S(#^{? z{8UOQ9Zbd9!DN2rBa~7)h{%;ev0wXGrIp@2R7Gnuwy z+}k#F6Fq43zJh}>@pg$db?XUxz9-uL=LNM@yMe{H@ouJB4w0hIy<1VoY_S!`sj|tr z5*8cTh4_|SjuUZgbM#0hc4i@I{T8P!IH`_am?h_R)Z@I)p3A2`kMlZ9melCQbS@+3 zb?Xjn3)5D0bbIM8nBewef6_w~3FozL_Vp*{b&+sh^YeIX`;+s!NI0))z}mUG(;}SL z+-R|>)uVAF=XF##ul3pRS%OV!D|O_&jtb{>P&e@!>iD=6bvduO_XnQ19h8&vI_h#> zyYkm4C+Bt4<-8WTc5vzoD#>{r70zomv8~XAsa`A#Ij^I_c^%MczsufAW;9Xcye=vW ztw*=ztm`-y)Wiiuk@Gt0a$YM;l(UbelAPB?9_MvHo4SZva$ZM;^EyxrGg&yVilOn~#cSvjvae<(_`KRK_{P77jWWo`!N_4_ns(n((i&TEmyK8D7mNBD~KJ%vm= z=|#Q*oY%P&`pT7akV$XxA>h0YKxkQC#-+CtLeA@Ea9#_0;VFu*9ZWhjKz3N1#HB9Y6z)(zw&BA%z36NlxB-RyB*A0V99%^v4<-OWt+ z0SBydUb79coR+3sAe`6DKIe6pjfuH(9eTgn zfDz2m;xyl5XOQ`{Igj(YSvjv=kKFbtz-x#2oYypAW0&G+79PTLjIUU55u zu_Nbo^FJ&x@QVn~Am?>+j`Mm3`DB|vclFib89UjTUs%3W4|$x|&B}Q_6E=7OHqfMU zUVEakoEDVx+7prUx>-4|JrOyto0aq06Or?}Svjvg5jn4$mGk;Q&9VJPzC(U=e6oN zubMtXsUqifvv6LE%Dp!G%<_{7=XEYy(|!b1a@qQv*8z3ixm0IRs%GW9=GBJBQh}KE zyQwATb+gZTt%*0YN^&K%zfKG}ubX|&>i}j2fTsQ5#E|p4+2_0tU^voDdmWp-s2&)f z^I9=f$69G;fpA_o`<&ORr%Ia-W0R34XDW`3zs2Yr%Fd*LgUnDCh5ru%U^M}hw>ih~ z^cI{p$!U{Ae?8&6Zpv|9H$8h5U`@^q1iy~{>d1MWr2D#XUN?yYkB!MkqFW&~N&2)4 z=XKNHDRp%hijI!%!g<~F5e4oS0Lfa4$$8zhlQvxOfk#5zy%6!Xi%O=G-lk1CuPL{7 zIytY~l=GTH%G$}odEKU**Og2tTMM}ZDns#s`%X4yTLM#0_PEfDaV>DJ+AQAz;cgfz zznRv_d7UNaHKE|Ve!>@$^IC-JmYaE-nx42%WR>$;cf;EyS>}5x$$6bs&TCyxxXPMI zP?GaHtDM&hn8wX8GtyTo=k?LF zK)GW}P!f}QlnCzPXO;6hkb#SMhX`_BXO;6>bCK$~h!E>U&g-mlUU#;!2N4YI3e8kA}dCc;ty-(Q&E5T@>)6-&tC z-Lof06)%AFZ3I%+6BO3Oxr>1G)4l>M-l_;qVb$KFf-K%Wg~gkez#;|}e&j`1ynDJV z-T`Z<*Cr%ua~$9rdDFc)d1s&-dyiDcplX{H+-hCq1HC>gL_@BP+_HVE3DseAn#XC#n z{x^T#A#n^zF|LObz#B{Cxni`qW-nE80S6Gk`%2{H@jJV+a2x@=twdfef1xW2mlMGI zU*r$NKkCqGn#EN687`Q}Unk!HoZCavIT#^qMl6nSY;&H0okpHE$!VL2o1BdpFbDp7 zCJ4rHGBdKw%2RjbUP}e1*-Uji3?IEO@;J;EArp=@Tfrn)Zw#Yy;+;^g#Y0QMEW~8g z>g8BI%U7@Opf=^JscB`xou0-W*>&5fTK*_h=h>6r#L@~@v6vC#rtW56d!fDHSH_UX z23jpo-P6>5_%+gAVnersyPCD|weM3q@^NS{w~HRuVdh(I53@?qcc8u2e)dc*PZ{v0 zu7T=-_d

-GsiBOUA1+Cn?iDg^+zP!A0;O|6srZvDzfw$lh9PrgNYf12Mhw{+3|Vi6tRO?S zKf}MH&>Tc){>DG!yvgCQP3D~gU)kT74`j$J50z5Ru`*MqkCQ@$*X#TdvgLC*CfYEx zBiqr`Oy+VAl^^SCuyk}aGTC!7*150z0v~gik9iX|O!;lpOs3seJ&kuedWkANTeY^v zJ^{-&^K|V?+Sy+az^@h}pN3YxmecyVn@kU^adgy2!z(cubku)LQzp|$B|le+l!mW+ z*}H0`ZYDF?SM~}&e}^&epp|Terku5$N`9^sNrbDu=$LZWDZa9Q_>(6)rp#PTB|le+ z3<@9mM>t=ISvSvzedWmTJ}B)cfjl#BQpwMiA``-&T$7jbpT2Th_(4o09a6@yG?dSk zBD2HKzUST&+^S^uqLR1T$o}E4F<|9>;VNg(p^{%DMOK6teu?v7|MJ#pGDi?nHVlxB z;cN_&Nf)o&w%%fhex zPA(Ja1y#@bn(Cz+P2`v1g%5O*!>mfwNC1z9e}e0^QwEtq1U&RlWbj=0kw-e1Fqw8= z`P=ZVX!SsHlUYsWYt=~kX850ISiKjzoq49O{A>6tEL4IiXWvBS{mk>*@WmJgUc(Y( zWw>Fr zspF(SQ5L+j99upiVr(G`)p zf-O%3)VcDv??KFeFjiU*u#3=Ic_zkF{m7r8KEf_|rIUIQ)f16(>lyaH-{#byfTHFl zYA&{K{Ue|`_-bnQcnq39etv&Wp$t{IlGQy!+}XG}xBlK{pVgN0OPA}Z_RoFXzwC7{ z{tpIYXiGZ=VlvOlk`Rgb6FkQ7_&ID&*>r$0S0n(#gV!O1dk!D|4>@Z?dj;$oVMF*lQX(+A9`0YQg6L%INYl1%+bfIfJg zH_x-6nalW6zlS7E>VwzR)p)XXi^u$QHcgEtLLacb(2dT??QGT$qZpv~!x#Em?iGWS6E zKqX)VT|0)Ed0l%X6v%nLFGJUgEOrbuCcT~tuWLo&b?t@JXxDC^j|Qcoic@%uEoaiy zy*^;ZC@>B#2mn?I;4K2QYgd4tQ7vfZfv)|YDDB#lk7x5Mf@rsEt7%!g_E#&su9cFQ zi6f}fu02}@Mj*A6!t2_SOJCmwq2Jd;pv9l}W$0^>38AD(zfXnN*P`(Inq-&kYlX|uK)z(O#DhM1zLBRB zx}m5xyrFd}BxQ2`PHA_XvnaK8-SOC{e=obqsykH$QMx;$TQo2@i>Ann+2kao+7ujk z!E$UJA-Y##n?8dT-e;+X*-+*RTH!NT;S$?0$r=tiydy#qc+5OVr9JaN&y>UsWOcqj|0kUwXz3~`pp5i`;SHimEeC;Fl<~de2BcXL`cPX{8 zbx8ZT9S%Coj%n}rwNKf*mgT2?mD*mo@fSX4`)tWi`=zga(SCxNhufdIe9I0(+WA-* z75>&P0ZU|uR;}C9*S==YKw36KF6}gG_dNt@|7gP}<)=N|*S=#{?3bVR3~Ddfh_vt9 z(T7>X2@*b;(#~}^PLW#&OD`Dk1SW**$>t_`Hjyx}Vk4}FT2$xWhkOZzpId^9dx8ybNT+9756 z!Jcw`=%1K5JEYu`%Cccdd35NHxMXyX-ZeXwO1k+J;jh3YUc^;{x|up1oi)H&gop*6@Wt4L^!Y@wQw=bsT1%C*p=RK+FzrJh4L~ zx`;1)8@Gm^#*#45%rTz3u?R*lS@5+im^2SAtz$fx*%JxjBWU5>p&HyU zy-}Pwj0%21T^KKTwyk4p$wD}@%|{F==!UCLn_Y&L+v2|@;^7o*OeyHj`x0_=uOS!l z5)l)A3dFpEU1;oJpJ*i=$+647>cye6h$R8k-9VyR7^l*DT2X7I*5RZ0+Lrn zQ3L^dM@8%k*ei&IS6|Ej_nVo!dv}w3|L1$2FFf3tIp;TL&YU?jckayGyJAiFD@41` z(9{5{htUbOuCsICO-+>}X({qq=^e@7I%CR*EitRPlfohZ!wKL>nexS(m)Qg5-Kxy= zndH5Vc}>bUV{nJlgQ(%HB#+a*lpm%KvS)jScRzU?U{iiggvqqoHLu5U@;HR1{B;Go zwv(6j8F?HeQ#?|8usxdTj9Gt?$#F2HhFk`x)-hB<;v4s|K1@)#=OWDcKACWpF|=CT^v+NPr!@!36cx$>0KLmtk<)U68A z#<<#fo(dtZlr-FibZvF}Fm8JN`-E}PDP^t<#njXmLI_@h3vri7v%a>5T8JuUc-rGlZ&`)AxbE)`J(8v5YX~t0G*c3E@crnZ z1H3DdJzGb4ZJ`L>KGk2Xf!S?DJtVXN8kTHKM zinO;^Swh72>dFuo+pCIeW$X;m78{!WQGoPk$k5xXj~=vFjq6dQmHrWVdV96#OUvxP z8{S3oc)TZO&HvuSk_2;@!qV$Zvw54=y@44mp_->pnWeWUKyR-mor%gkD#UxZ#V-|Q zo=)+`0&9JmfzB zUM6@x_i|UwY$Uj7Fl6|Zco4YDMJ8`i2|+}p=X?-XVRrsGQ~GBIChFydJ6$I*!I8vo z6(@F3$X_SD28vIHcesFoLWX~W6x}_6qwDa&81MKg?dwQ3r$*@Or~Ux`Rc@;9D!Wv{app`-i>6gPUgD` zc2`5P*R={f+EtJ;9Le6iqq@=yTm?->LDZ%CgwsSEJZrjzqr1yyiMOdPC}3tR=)V~UoTuM4`xRq(|zlsjA}7r6>P zLhB}u(8*g}BdTFqBG>X}w{|TaUld&ur$A}3Mr4t>fz0)Di=vr%WS+vUPPSeSqebQ% zPo^7+$j%QiEskUsl6eu+D0#5UrP0hSWWGh8*#BF(oKKRu1KmXq41k3q#hxOw34Bft z9V@5M1u~~$l|WXV!!wS~sVDK{tIY!MNoXR+_TtG99g$8X*7&9&#+y6fi#8)^9?WBn z??>*9C`m5-7o9M~RZj-vr;>XP4JBtfqp3q&o($9b5>wX_GLE(BDjo26C@!Bxj|^ei z->qp+gkbN_f|md#5vY~aX6mOQluI7CUcp=uuyqRNnQfO@Q@X*cAk7^C`$)l>hhVj2 zARo_MzL*GDOd$&$5rQ?47vLd)c_Lr~2e6%1uF zmAZW&3NZ`sQR9SanItXt#SnApJFKy8wj^b>zF&lxEB?U@#b&DN^_4lcLThHLK}L&J zmYmjv%xuhA5=%!eK~9%1Hlj{S{?rjf4aIa`?w2umzymC<94v`@Mb^~95O$9o?60tD zC<-++3pUt2A?%Y#nhVUnjONbV$6pIbGWrz=1dyLKL-J5Js>HMn=M1 zTgL704h$!{w_Qp-@=ZhzW$cE(hp}b;v+YsE(X%=$IAnz-zog|hF#S|C{SBp?s zZQbz{rry7smWrDf%n1Ct>$uq9^2Jw-x26su^zan`otB;$M}VzfG1kbwjTv6N1+wq? z3J+Ak*Qf-q1_t#}#A%0_sq}xyv}-CyUze|DL?)$s_!8cVoc0ZKw)z`6_bt-h))$eJ zy-ICeT2qs60My5}`bz3yFhwZ|5m+UbcNJ@DF0suE0Qy1B>3*JA9!rg^sndz=PA|`u zC$;uT<*}-wQ@4h&edR6O#h^oS1ePPMW2<%gOU!TuoBe{ki<>cTw#ZZ~B9qqR4?=78 zqEJ^7hIPPdN5Bry1Jpa^I)fkc0@*{2|0h>ZQW;XpWw0^^6UO<2WZE5$^Mj;tdrV-G z9%*G<7Xo*%!G~z22Dg*(I94zvPmSh=Yo3E<$g-QjAC%7?>GIX18U}%>rRzq^>S0eU zpWzL|F|{NUX?Ma|mJpg+_5j)Z4p?Yv*_4f+c&XxW0MsL=HK_?GJ~^qUmM1%FfqH6r zaxj!vQ%jPa11^q5gVMsw*;NbgWr$}pI_6`+XVRZ-K&)wq)@UCs1yjZg`^jOt3* ziA-{KI;M|nF@2;6=Wmq_KSqL@KHe>>IRi=jMM0d?$IY^zZelq0^pU^0h12=!`V+3% zEV7074@Sw$8@13<&%55ZPklTu8`oIWW>LD8(P9Bg{rhszevm}su=m(EM8oU5il(=3 zGRB4&zu1g1$YQ16Oa`Cb;x4YnDk`7+CGk2XEqy(Ce0Gcb-#ut~y@hXhkCMk{x43`n zgEyNwj22n%hIp}Z96t3AYGRkFI&jtnBKYtYS3??NkuM^9GG!@dEg6PePWkK|iQpT9 zaj7y1_hCA>(K_iy@bLB;Ul)u^lLlty6~TptDRY2#9I)A3q{RC_jB2XBl8|=*Zcn~9 zZi70m9QT3R@`3gDfGyWzMLTYnBoYSeKNq6yHZ;BWl75;DW#qlDVNSsV5;!ocGNu1T zo-%UT2HdxKj|%bX5TcCySl!WL9b?iImfj`AJG2lJr$rQ_dFBCm6amVN&wh@(aYW|% zA>J#0<2kW>=Jf<93%|Vrw-;?-rE!w-n^bM!Q!_D(D#wI5DG@&jfh%^>am{OZGhR)A zC4F7;I$ZI-Wpx=qGh<}vF#cVnE~5~1-ONX6UUyKX#CsgAJ z^f_2o)~)2wIq`bdkh)kUP@U;pl`U&Cnesf6PL}af^S&I3H>z@jXt>DWBr6zx-SdFNw8jwp93*l6L`(#A^I@40!gg-9_bL43P3jJ1_QiYWyGt=B=FYW z!g~{=EMb86c1F3l*}_|lXsu#C^Whv4;-yOy#}sWRwF_gp~*H8d}Re`C^=ZGB# zKuK@SntZt3vU+B{@|g`Dd?&%F(&}x{n{It7k`5t5k*hY1aQb%&xdAb`dOd#DvdINp zb&1FCqmcBqBEsL*ci52*r(7`s5DVaSMH|dzg|3e&7hTy`NR|2()@JGV@nRhcfKGysm0NKjsx?c+~*Vg9V z4Xy>3a8K4~rXgnmg20Qo+-6=Yb*W2G@=Xn8?}g9*%q$q(!$Fr=-eSlzlw-$ z0H&dM8w7!QxV$vqE}W^50XSW?vU}hfxF7VTh-ej=GSyovdyvr8TPtOVTiKiwW}oGS zKVKVo1;o+l13E8LD0?xcB!Qn0=VzuJoLLjvTbBC%gcYd11aDFSr`b%2gPqMUB?p>= zIz3WE5@}X;knCZI9*YzKOO3PIPap>?lmtyjv}bpeGk@&ZSyt=;eiq*fECzL7WL7Is z0wTX;>%MV80R>or-J~$HYIvqlyWMoj4-v2Fu{li1ybO$q__21reF~e6WBfBPtn7IB zuGZ!cr1MAH*_eVr9t91g-iARVlH;&Ppc~jc;ixrXD#Bbm5*R1|m1puahN%e5pTsN! zl|PAWyBE{Oy;dQrT~YVP`;lHl=)%uBV7@V8q%z4Pp!TCb&#?mUBF@EqU7+&qg=b#? z_De3`-#7>5eFw-+!j%P?LWQ}#5J*~TS@%)A>Pq=Y#3gs@OT;@Yk5#^FGER1%7s!1XU2?HzH1y!;S9(iSLbQ@l0W*Dx0q| z1ok1pmr){Ef!9Ih%ve*I(%m|4w>o8%&53g00?2%L(IBP_=)5{=md)puK=s=#t7x1J zVv0p`gFes%iR{Qmw*u`z{ALjFXn@NR=gVul05!NqH^4MN_`0BmFog!->vMrp#QDqA zp#~tqZh(!5Lk$kh+8Yjlz*BV^je~Ql3V~A~bH1m=GiAn(x~`9qu(CO^%jUc;a1m)9 zbTbnEQqOl*AYmCgXk;amNVBqed^gY$Q64_k1t_OBotzxF$khOiLiCGBP!efYb}}Rf zf{2c-V#{EPN{+MeOXznXG4Gq_G2gpe)~X!*WppJq`)Z2b1LA+6zAm3B*pKIz1D_zi zuE6FnC0bG&*I6WV32j_oBaXYmBwQ#^Yg8?0Sv)H3km~I18DR6kw1b^QnyGFMqRH6g za3?W_gWCO}&Ll+nbK&lV1-Mqdn#0h-9nI`$62By51#SY0A9&J|^@tYO4B`~9-3!a& zEvwpjrWdZL&ggbT@mnfQaN%@0a2RQRep%;NH)+-3-UXeXJ&dBy{T=j{9*m3sYK|Hy zomEXw_Tsvz$* z?AxkfEARq9g=kL=Qr%V=;w#X{hMG=|@H+D}KJZ!r?+VM}X=0sEc@6ZMW#Q;BrK3|f z0^-wfPy1E)B#w>%GS8-(?(>5o4+NQ~Y#s8fTV{ZGYZW;4oucW==N)Fpi_>wO`YZ&n zg|`sbR)Gg`?EqSH1Q`8|7dQPsJv@lhg|f7FNG{C`!RZ2@+!;X*Wa~X z3ZB2P^l=le#D{c_Kn~Nq&#cVF!1YE0;E0mKdJ7*l?CTBdNRyJ3Gvbn6M zb2bH$N6~bdOo^Jd@GwtcD3V+q)J&%IjTcLSBayJOxqPXF1!f}6)lUZ$9|!@F-_FoP z^RTu7L@uB@k&BLj_|r_wD%;!(axr?D&~>BtRzB#FFEYx?=H~UHb4YP-`=~*^QVDud zoi*?r08bdehm`?)0mwb?(+2QeWx%Nba{v3h0sL7RumnKvgbVAa6ET69O|3OGZS?XA1Hu zIuOK#D-2Ff;jcEp5n@pd&-sXdWYrPdOzYb;07TJ$UOa{v9P$#XocuyPfZ zQ${TjwfH;ITGYE3_PPO1V1VC-GdOB^1j26S}FHvqtxYMQ17-jRUQlJ1Be(6ICSC0fYa@4R2FS*OI-l=9;{?~N@Je z1)(czFwL2xPXB@Q?J{=kbx8B1k87>9Onyc&-?H{yam3;tylsnkP=3L6xg>+P9d!2* zSt0F_=v~ugt&l9lCLtD-hKQjY>5K3@1#AmSKH@8d4>$g9V#SoKkba0SSNQIT;V;k) zHTW;|+G_X_zAi8vpvc^Wa= zE(lOQ*eqvlC_^zb_)Laojttf+c@)Jwl7zaSFtv%>rd?w&aJR|3ZNY+bpnu)22zN9N z5q5)M-62PKNT7zxXYw^LA-c?J`5n?ZTNA&bHVbo?2{ zQd?cCs;h--vD_Dg&l%t#6UJ_i3Ceyjiltsr1?-`a*BFdqoSLhET_2uP6%6&4&3$z> z!YtYdt$EC#*c7wbgpos8SJcA{Wj_eBTnoZtnS#UZIBu{Fgi;Xnu&U%1o(GL59kv^J zFPZQO;2btS7Z&`LFi&EV63i11^rrquYO#U#>nw{eKkDA*cpSQ2Jr1prMgXX0UL#qE z&Oyt!Rw0Rno!3KJ0jmGHcHDj@ycH#Ag_OuoRrwq&@lF^YvvQJM)>=7%zHt{uwuM&Q z&x%|kyQvSJY8n_pjZFwZP?~~)j{6&GWc3g$?gD64kRd4l(pd62;(RV_O=b{---k#% zh@ygW0x`5ApAMTDksWN&%h8H|f)SMSxE9Nbia=8V+O0ra)wKjy^%s~78}KK=I4WX= zbCG_=awVbNz;rL0Xe0#leF5{s`GQ>whJm%AdTQ_1es4Em3ovRAW zxKf9&p<+wr4)pLUEBMS6Q;X>@+S=)5NZ$?Xjsu@E-b8wds2{3aYps*TwE-;Q^G%)F zE^9r2bF3=sxVSn5=?&6m18${D@X%?BeawVaRtIG(=oltv0K#X&0@%AsX;20u9+cj= zu9TkaRG?R#AC$3(2W6zX4#1Up%a|9YILf>mf^rq&K^da1y>VSGw5!z<8`ZbM^eYr& zk-E+%0~)?#G<@4=cFNWe;*^FZQVsRyL-PBE^_2-@M?vQAU|=A}^)PC@LOw<8G-41v zANlxPYvX^nMdMBxP_>+d>jt?TI4=CyxF2o#uK~xJZWONgoHt}!Ozay$S*i?CtoVth zN$Z<1b_CSB695~gpTFNiX^qf8?s;r=e21OQK94PK;&3S}dmr!+XjZ(dZo<=8IM4l# zJUXuP-0vY!f0tbsmWT8lWVF=EvAQuVRL*u@J>V-pQ&IICR^>eLy39=yw;8MRNVhii z8E(SbDYSE4aK@`xI$|4CtR-S^s8|=oV(PfyA|D{O12xHo6IA&*D9<4tl!NNJ7uV%- z3fEb5k%OL~!~hUtViHo-o)>j0y|AQMvH>!5FzW4KCwP@X`DA+5t^%i@XJ z6J~7v#e_|u(^C2AU4U2dfXY~;@fZj+-yks4y&`bUbg$(WOg-j8;yrq3EH>d1FX+W`3JOMZW9o2r0!ASO(p=nP7-Og1=bc>0|m~r4s$lhI0(1} z9jKKls;voSPEeMGI#x6{rkUwjFG8T*vAUY9156lGQc=g+hXQ#PP}iO*^N4!4Jc5Lk z&2xX*JoXp(0_n5!Z75TrM^v8i!|<*iQCTtUR3Ka71KaVH5%k)8GXctaLmTirsJYcV zh|(r_(A;XCLv_+`KZf$epnL66xWWyPbqC7y7(gKt%Cv`L8hEjw+07|Ioh9Mho^wR+6cIO?Y*-C8>K-Te~ z2jz0*2MZUM$GGpq-Fzb$L0OF}XGa{RkD8J>GV7Ahvt)ZTo~KH-7CnoS`4UIIsb|oH zkGkm- z()=+UEw5G)d`mwBE{!8=n{a13c3B)}k=x`*RV0EY@pBpy%Tmetl2*3pE>fhTz| z@MzH<814Z1`{2MNbDwfXl*E|RXbqZbTTU?g_N43$;`dQi9F7=>k5oycgmJ9bhjxwK{aaTperKbE$ zMejg%BW>O@y|F~MMZWT&LfMq3b|YJO@4&bcxEhdkh-e{9q2yi&T6kY3KZxi%AwE;G zPzju<<&9HNwGsFLqAss3iYbV?1Hs(aarps4lKSallo-eUYTyR|>O^LBvTHqn89 zLld42vwk$B-!#d}z7~-{!fwmrQADRx1UiFQXS8i3rUZAB#J1fLn>_^)^)5?x4?xt? zf8o3T!R!L674PV!y8V>BI_%-MM{TtMaSqAbq-TG~ds29^vG*k$#shPirZiu=%i19m z5R*f4C>Lt2Exz?mnY3V*B}atkCqGq)lts9Aza)G&9*CQF$i6`!yle<%3wB7200^%b z!igJp$X_@WFK-y3MYryh$s1703E>oBZ)2CWQwEc8Qt5RTgc9lcAeOh_zRF+N7Bt$V+p#CgYJ0PsA&Dv^1A}Slrt+cYwLL4i5zY+7WEzYj2_N>ng`8%5(Sy8q^(td4|1G{w- zJ#WGqD#!Dxm!h&<5jbZ~%R17SYu>kJH_*Xm-Ua<|e zJctBrkL%utbp2j7VNyTzXVBS|79ks_Wk>C7Rnf1o3Qu;a+zoC$5YF4&#z??&2dNbx zwf1PqolF>#Tq@^`K7RD*D1ELp=vmRZP-1n=R;W+Doh?G23~;GWkJ9>c*^fMyx{e6~;fQ0+`^Lo5YnsEVyY zY>$do z*^Es`#IbFmu212*T&}{^8iFN+w%Ay{Niimp#1*LrAu|Uh6o4=7a7qF3l|tJVgOV~m zD3cIpPdkUw?lyIK-Gt2X2k0g8J>=9Y3L~}~MC_;Ns*S9z#LzXXgGQU}-W zs1E)%$u$SG6;B&EL-%VdE(1VY@fj7P73()8jaKZTVzlBu6{8h@S20?#XER#yd9)9$ zc$76ZR=g1$ZAJgk|C1Gqz^%xN=OB|-JPm%N6@B^Tt>`gz>2AWX72BZJw4#mh8AP-d zZMrQX%8L6?9sC7$LmWR-tRJvc4^S(!_#`n|+vM^K@SuR%bT0)-KjUpdj1PgkR7jdW zy?GqzEs^r9z&9X1c9mU-eQP;acXDbNLiR!TjPe0U$atkQeUf$AjZFzrG zEVmrnAy+Ry7E`9ZJa&SKN|(n1&&?|E8^PpGg+|NNZ?c9GM z7UA6b+}EqdG;i8%5b5OQ*pW_Nj_q(pwoQqQMTNNy@V=4%u?gcgq4H`Qpj4g!hMmlh z4=+V^hA#~K8(Y1|^2@Ou!u9xK5+Xaq3gLQu`_^!$67fB)*W=s0 zrG2H736;WXJ-$@3fZ=-l3CN1uf{Lg$c>E+8e244seNCRb?7TZFXlcEl5eGxdoY>dLXpwL)vkWGl=> zA#8jJVR)~ZFz#Nrx$Dk2g3qc{$At09 zT!sJAgfF7jw~5-`OY#!*uOg^p8A;wbAY9Uh3#=WGEw+}IYtF9u(1_E@?NPE~Q?N*= z$Fn}jSg$Ay*<+y5fw_JJVW<3@2Eqd_Zb5czYgenvZkMXoTntj@pf}%5JBr2t5c{>f2bV}6h~Vl( z^dIuBP|K!QLY!VYjfPaRM4?R>4X&~mWzvMDaubTA2`zkpC~UUnYC7K9rf~`$ZK}e> z$FQ(;10GiQ>;C#dnGSbAM?J=?$EJ^vyiFeHi}~6Agn6!M$JtTeqxgMyXi&fpAN`G6 zo>r#{$W*7;F1j%im=p2o;HXUtKM~vySsUeYOc-|=Egmvq>{sTEZIreBs4mOm$HM+G zWtGTckXhDOCd)ZHYv`%Om;u>B9;2T`Tb};5j#9ohrPz>Z1`;;5 ze)h5Hwuq<_uT>@L=CmQU#Y6Thkx@{Hb+RE_Dk;sT+ajV$dq9G9`q>JMm@g59_j z<4ctI;g$dNQf0I@VQg27C2xWel;?3}lWv6|E>7Bzt_G@vI+lY=q>{6l}>O^^m3ZPQ#j$NKLu8L0{p~Ot~ zRn9q8j$Z#Mv|hkh67if|W@NqnsP@8vogzGaDUyq2{s2Q|&Dhs(77 z*k0!a?}#8-6lV`FYPWnLzvEK7j|Utpa1E|Y!eiEYF9~|gTCc_|GfdqC^2C<5oLZ>9 zdZBWFw+M`E{uX**6Jq@3Z~cIyUyCd(eFLxKaPTsfnsG=sX-b!fCVdIX+w>8?zG`v) zw;U4Gdux4#LRxesLA@H*SG^iGAA|?g>D&Iob79Hzf)pyZJ|cXA+)>z(z88{ny1U>{r_dhO80QHzkeO!)^3d5@gDLkw zB4-a~Py{KeGp<*;wyX1##mq4Z?tVK&(2FiSrd!T^K!;KQarsKYOSOUrOkqcvb%*F; zd{>t%<`-4WZn^0d+=js3Aw%aN#~NeA6StPg6)(e~mRr_HQ`iK%&^6Gg8JLTggqc?v zCZ=}9YK~*)3Nh{wGs)3&LYr<&tz=GEA~&NX-sA2wB^;AZ_nKDzD3#v<<@@<~f2Fo#F*kD@u^W~NAkxS)EvN#qPjHU}hObo_kt1V)x$M zjM@p#4Tvq1E^lf>oTecL*yCY6P-%@lCikNz3|m`U!&gwwi;h4nckY%M!MT9gf=gwx z5j@Mz*chdl`{s6=7M_^b>!XF0A)DI3GQz^!fIfU$F}w}9Osb&XY}j1T?P5#hJ5y{I zV}(mi7+s%P$Q5Pv`9_8DM)39s!F@obp#~d)qbd=6qP*Z|l;DlRdW|>PmY9&5vyL0p zR%bLfTwm-&u6$cJ^a+!-!4ciizd^ii=oXijWm>R;`!&Xr`sXv4Hd zu4uzREE_st1Q$32mrA}7Tx@4-j8aTPAGK-WhJM=!+lXv5h&4fL8E)v-poA;FOfJ~P z8l+y2d&d;J*3@H5CG{yruBiI7%+vK52!qmtjz$P>4k`_GpAopP62a%o3%*ea)(!oX z$@ZZMBO5xJ8?G-JdhSWx(0={#!A1bL&8FEF{=PRQ^dl-}reL6yLEl^pX-;BV&D-rCwuAJaj{b(rN z&{a+AG&5mjLq~JN^|bG;-AA&(zC_AYjyk)CCwe|}W^!X#+0 zLKcCuO~rzU{fQV}?LiK8I0I@+FMT*;N`L!UK{2S6AI>-pZbiom`a&k#u=Qlf?BhI~ zQ2K zZJl=k(AKFjfH7KUDq_nd2RUe+V%Ef1=L2x`p^MY3O{GUyT3=tWbtXb4ty2Vkq;<+b ziMCE%qvH@0hOKiygE( z(Z*&uR!ob{iYdOB3`jP=T^wkISkI6grf`Ug5(?k^5T78P4d*n9KL4CHe;zb&EUVs% zq&e~4K%Fd48py3zb)dbWI?#SK2Ms{^a>;2H$$_5xk&%Zg z|1$bn&uY6$8$iv`-*NfjscbYdTZ0gG zc|H9suxa7@%T6OF9?h$C({f7WMsDZ7TGNznCDIE7*0+I? zl~IYz*2oZ5-!V$2erV`!vfXHB4=-x(G$q)O{x&3JQ9U%;^pFVh!&X_VO4EzMf!`sM zzfh@{6VbH+j74l2u{8gq;~*Vbq3sIkscN ze7V~HE}%?%&s(1hvSs-zmRXMNkVyl41Z}Te4Ff|^ctdp-r{$oV?16s?^ zjL0k_s!b&#^MQ{Vo#q*lSA3?eTSSq6*z>i{pw>h6XBmOFR3f;nyx_w|@Ir{7E1jm` zKPz&joj}l)9yh{YvI{BaO1~Q={j(^mMBX#%y<>8GX6KA@hkW3p>Ror3HsyfRrrt(m zGo)GLZ;Ys4D-l^%UgQl*s=ugxrnL#>pP+P8BT`#j;Tg{Zs2)p9+4m#O zvNye?Gv05)XlL`!E?1qmRM{oc>5LZnvB{eJvKHmoJ&gy1rPz?~%ax*UG&VgXBJxI4 zZ@4z#N>gG1lwyyWX2jds{I&*BZBksm68T2tCr0E^Bl3t5*}{nGSc%B<+#s8c$SY0OT_&XXSw_6g8(&Vmwy{mO^F|dl zY=q0=I&<~Uwe~hF@s(bni|`f%2iXhX^>Dbc2fL`1ggK2_odSzt=6XG*%pgzR;L z!K3YLUQ=6}Pz&qI*>pQ^RONm*MSW|EvSHipqC%t45mTUzm|jVNHr=$P(4;nLeeTs`4VSz zkoUC7Q~x#XJ)6OGU-CjuH=j?8g`kU1yEs%t$c|9Ei9bB!+%YRiMNO^~|N zPCZ}Cviinw%UI<90{W25>yEQKHJDAZFGcpITU+B3>b*ALdK-!s97lK^Q`0K$K?VS6&b`IL;uAp6Tzg`7Z zvvDeKlbAut4xGxH74umG2TkS8j@bu!k$DSZwnC-Ia>vEoiQ0r{D(}RY1F50B`y{P7 zdT=#4)(x{}{1+)~TDKmm+Vfy-n=)!mO%N`nN&9z7KUf*n^n{8!`W8 z+L?@$-?BmkE#;0bmi13v<{4FpMr<{;QJQT*8Py!jR?|L4Yp7pH5!WbwvA@;y#6C79 zX4K*oP-f(MZHjkPYHv_x<~On_zEQtb110cAj!p5`Sq*!&Y9_}~hm+7<+|u!uuklev z)Th%oXj=4v=}6bgLIAxUzBmEl`U%?JfYQpWp4r|%DTktD?IeP&w$Ovu|5P1 zzkwn1|*1NeispDlz~Kr}KLE%B84NZVmYg zHPkfUUbzM9DY=PK7s_jz|6vJCm}lkIlBMuiO@qfjc0F))WoS>ERdfdJY>slEDxX$8Dh+SxeCsf+e9{B5+Zu{%R6Y?+-CA~s2t0; zUz!uyLY9MPvitYT3YOkRF2a(A2sOV$yX3Z)yU_ZYShiPQr5QU(B3fG0F553#k;u)F zzv23t79-h>uzs^wiqRUmgQR0;+zG!`M&B#XLGRq5G9^TV(HEkK+>z1}KBjRodu0F; zxmQR}sHt^5~YO>>pWYBW#(cH7ZK z&Y{v6FQrT-!*>1KKM8*@G~hmmnfiC=fYvaym@?T5ME{PLC533-GTB2~r`ItW7@V(6 zo`cc)cYXk)vZ49Qq!ez}zss5`AzE&kv_}i}&v_q>t8p>$qf#*D_3!#meVgPOwZjie zw=A@z0hP&Gu={s^05vi4;|E`aEeF(%ZP*b*oEobJKZIaFYV3OGVkpPZ5Cb+=e+->v zc3;+g@F%d=fL+xSQ6q!-2+RReV{%J-_#A8~@0O!@bE7dYK}kL6>RbzJz6Bj4Dc_-6 zMpA}^hH5)kdst3mtfnInwO)P%Zw$zO+=lJH8?*6P_|sysCc6PZYg$)<5_dSp9d{&# zNh{_pm?ZA07|$>aJMkL^gB05#268-}zXyPFYAh)=U7*Cp<;Gztd)`FLYk8}5WWrme zQ$Ge(M;EH#s?936=C}&ReyW1;zcZ*lr3pHnxBAq470eugHuhEz%u&ItwJMnXpbF+3 zQ^C9+RWQG5RU&U}sDgzZRdCA)6)c*if?MxVLD4Z4EdD|TOD?G3wwiEJZ}p|=D!9F` z3YPIFG`!V=D^;*!hYD7{se)G*Dg|GUh2wjxzj09!Pt;SXw-RA3Z}qqHRB&><3f?`U zf>SrC;Jqy>c>gSeniJbHU(HFwd<-UEt%B?4t6<7%6->QP1=9|zVERcF%=keCGi$>2 zyfp)DR4{k23g*pI!Tfb9xbdJ07MxbWP5)KF&7AXiYu=Kof<+xvP&hyZORiJFZHrW} zbfXHE?NdSU1r@CPQ3b31RKXnyaC~pgM_a1kv0*AWuuuh$uT#MjhZ)q)`Ah{}yTLQP zwY!aGP^V7=jF;Xzm!_*AZ<-?ZYo=2DJE>s62o+p5O9ca0t6Qs}f_o0BVEb7W-21NzcBJ6m>8-ajM+Lh^tKhyHRPexh6+HN)3ig~!$w-Bj?%wJLb@CKWukP6Y=ZQNiPHsNjk3RB$i>Bbm3}lUXWwYJdt3O;y3u zt5k6KpbCzhRKd}UDmb2uVbWXg*$ygr?g|w=zfc7)Y*N9CkEr0KlPY-mcNM%+2lo!(A#keN+WseT<+=y{ht$3ad$dOmV8zi@qNE6e1Py7q$8inz9!qKhx+c7f~{Q4qpe5;U0YA9Ft{9E${4F3GdRNr!p z&>puujj;sj1Rsz>kOw1lVxAwVB=V9n5VewJ5Lh-N@j5UBF{B_{E;_pSn#l$!20z<_2>W=-9_;(@EA72CsN3Hk;-LRD=@iDk1 z7P;bEHAN5;-y656B5(W^xPcV;;!i;HB7b~SrjTP9p~&(z3M-TNmr&L#R{SQ|V5uwq zd)Te0D8{NLvG^UwyQ$8!_;-~$(}8fsA8kT)>ft`?jqlKj>imrxmp}f#jwWMEWaL-h zV;Yvk_r;9DieC+bN&K<)%sxMz*;ipmQl*RnxCzvYX?QTc3iPq!+bM7j^f6cb0yF?& zwVwv;A=p7u)bGGHOmfw!3*$7b=X$FZO7nx`mO3|~0UOqLWujH5vFY5BuoD@Dc^cNn z56sqst;DJ;86>3Ggixb+>ksP%gtz{P{wf%mt%9qEs$kMI6JdM0_kw{%Y?#tjxte>|%xd*1M zHck0j(hLyP#q!fDWkxP|fsxS6xTyj#Z8h>zlArfVndt}YC6z;aGhtR5Z=lixb;D0{ zX(mgt{3*Co$t2jIS|1fnstZCAdHN@~F7YnD@YzW7}E28q+jzI*b+u&d@2Z)p&;jwg>P=4O~D;R2)ubZ!C=KR~J+AB)n}A;F@_4Rjhi9$kEZlP#m&9U<_tb)*#I-6~l!a)iiY_Hq z%~L!}K&{*TGr&B{K&?CYJ4Bn*hn;BAtRc9#@vS#h-5ckb41d3*m}N9=l8c)E<>~@p zBX3V!n{hHimNp=*Xr)a9O_Evwy1+%AB$0v~vk_#ctt9(V_uL^Wtq3!tp#oDj*xDf6uw zem5soqMMw>SP7ga$saIpjWI4sq9`kkb0&GP8EB<0?LWFuPNL+&G|(?`(={ZCDyfT= z#;KLu3FoRY#T~AJcC56!$h-qtYTV$~UQLR2d*js`K!|OI`EZOUjT19@1jA^J8AetV zJb`mI>C*s+>Ta#JuCc1Ss&)&x9bLG_1#QkWPV1!kAW*J!X@l{q)l1-HPnHt?t4NYx z86lUn0zYW|9SxR5G8&a;N#m?gZXjf^qj3~Onb=C>gi#XE3N=Q^SY*`A=R|NWDTliP zan`tvw)JzQf5ASt&{&K`CXuX_ZsdIh@l={E?FWLVVr;K5%h*eS$+KE=@_8EmUVVEj zPDf?j__{%nl?XZvM zp?qX%*&W>0Rbhntk$gWPUVM1gwGyf;C&MV_%XTA~Xr>)r-?!*k2}vgD^R36&l8|hY zb$ks7u4R&bUt=_ILT!`G^1X&;Nk}ot*1jyXNJ1Tx?Ce`eZe5ei^$o*tnNZ&(`}_W7 zvVlqF`yOJssU|tv=cUe#q>+y!Z-MUzmYXISDtWz+hZz&nO^s&xej&GsNzU^v!VNH? zsk{trT&x^2^-CM>^n>C% z+8gPCN_%(uQStHimM~a)sq&u_%d@-*+O-GUwdv)3FteE;?EYOI2DtzDwio* z%9ZZT*y>36toFlc_y4qE6KB8|>LfgoQ4`37gSyeJjD{#G;Yr=*-i#NZR>EPezYmIh z105Q9X_Y2ys?hzBRO;7+52zt(lI7`&;(eoOwS-w<$-GRY-$Sor3Yplg{Ct3wzfhF# zT^41SX?&oSzZ--6ff)i-rZnF6!Kc-}>;~>0)-<7HV(fN>=nKr;R$u88}?6rplaYeEi44JwOc{)52t_+a3gvpwhz;y+=n{8fdJ5K@^ z8Ds+6yA|%vG@Y94T4`K%5T1DOwRhA;&xDm4YHHCz3abFJN7>xZ#R(aP`>pRuwy~aG zTWMUZ!0-NnJjNJS3D7OaB@EdP-}X&5!d%Dhge#$?bNQfBDX)fsCjbEZyRWBIvEC2cUir7U8-%xs$0w>rq_ z)}R+nBBYU*4b?aSgT}lWR_D6GjAoc>)Ll-;VpEh}g47C~ipe+&1?sNUDQ^b9DqVM# zPQ_XJj`)YK>0S$*6_G>fWhS4KmhYHeFXe!?@D@8E<1g zUAKgzfh#>LgP*IeTS~26>8&l#RTwbRT3rIijOAkzo^APv}f9&5hOy?Bu0`*0pxW;ln}1>$?edx?l(#kjqr2*PsYLt>;B**b3IU! zsjT|FV)Kwa5;FC`t~BsWg~gJ`^7b6dxj{@^8kY>^QIa!-lK^|xcs=P^SYk^U!{l0|3?=hNGd8KG0G*R7 zl(G+fHTgQjB;DLDxNa%`!yF=crjyC+7_+#XDP!Ry$?GfNa+On>Qt6kSRcav}&}PYR zYFgSfqH5u$mVCLIbeYMzvLtyh0d1YeA+>3~ae zXVU=ewMoihmF@jpqL%mZaGt!z;qEk%<>$J!l(1da(f>t1(P|zSvt>U<>EuVs@wmb* z-($#3URF*fm%ZiNH1N)_shl#?xE3yJ=~11XD-WS2O;TOm&$V*-4RVry35&4&TsxP= z7%G$RiYiqFq zjQ;gySWRU@EA40U26qN8%cVzWW=SF$9f*aeae-e(64J?~ADqJwKbQSw44LPnHB~aw zxFI0p321I6m7&~byy0GfETY@Bc8v|U1hZ>|pW6shleK+9SAa!1RyESN!5}*bXl6zL z_8=>bI}S1}2lQ%&t_oKdO2Voq5w~^)qNYSwKqI(MA$xI2POb#OEex4TZ%J_?ly7Sp z+})7B*oo&kx&}s5?srH5`|j2VhMzkiGQSZRC$w5hXNttFkGy1h{5K9B0hXV8CDN5W z&7+{wbeckoocG;XTM#7^MFnmU9E)IOQU?G_nD8+UW*wGoBTt;{_bxrWU9 z%>qq`X-W97U~#>)ivHw?2S(Df&B9>vhmcwn6lmZjzpiAO0eiK<#% zYcj3?Lw2>YEKYjF046Cw2Lh_XZjDE@0YOVp3MGw&0JY3(b-RKnFHzV04i~Z2Q==bY zR@b>iG{7j*cwG0k_!0$ZrXZ`^-Q?}WrSaqk(L*9~D=oUqIE*8Wr(Nt3$z|`o30igX z9zyO^MjNSUk4Ap1;nA+?ZEBK?S*7HhV>y8%cm*2$ZVY^jWjp|qArjiTtG9E9^`sCEp9@}CfD4jkJ%%2SUuI)k-WhZ1FxOA+$4THe7B5;Xg4-vD z-0ARKPnIP2Q2A8BR`2oT#9&hDX(dz8UTkAqs&*FjB+zFSam5&~Je?#7J`;%p=2MW>XFMSfqscrs$S;n9Xv8F5tUkAg zAPeQjo?$6H2;s%*vpWP?BpIOCx+tA_vHH9kf-IK1F}b%Psz$ofzY@~027Xxcxcm(# zfK9ZID1Ad0t51z|0Q;fNnUBf;F#6g+>r#0R4&YfOIgN1QQ~6LrM}jwF9QL@pMxd25 z)Fx&gu?xxYL(QD)LyX65#vsVCa#n>H&p8>|QK%aRyb$EgiXpnI=9~)QKD2S6b~TV+ zLy$8O5UpfNV^gKfZ)~oz4x3a9NN7vQCf52BY=$c{nlT}7Bv~V2J;cWB9WZp_(dhLkRI-3HFXeD96e>K*Shy0Z)u;3Ve^gWJZ1Mc;E|@@SeT#g&Fm@66dZp2M|bUgD~QT+W<%P05^qp6AJS-HsKd79NV#-94{w zh}qs%AA<-_9N1;%O(1gJOPQyE)v9BJn@?H-ij<{~9f^S7CM7WDveSaci z+&s(kj*GuvRZc|TnoSJgD_46cP)uTp75kmaq>Vsj{iO|k{ zLkJP)z9Bafp`Cj@5sPuj93VN#kqBMNvmwM)axEIiSqc%3aU<`$5Voa^#!cCY4Gj!d zUOaEOEY#D^y%^&|G|b9tXCpFGrCn6j9F8-LuoHD4=yG>FTFP#HRkyBl$eo3oM&`w# zXai>{x@y~OfM=e&9XeYy!dbQFZER+oT#VAlQMIpan5Wo1yutye25!(wE6;ZK7Tgz| zg*vL%k+4_Bp=yWS4LFXNI|#=Rt(R`VYeKk>-PNLMsl@53t_i_@ad$<$T&dAb`IMa@ zbGl5%Qck%Hj!K`gv7Y)d@1x&Th_&*nqlfD%wTO63`> zydfcMuBR3qrER>hL7Q9nJZn5s_7E!8DiLH9HT++`OG1`kYl~8 zJ8sCVLMe0dd-b+Lk!-+GGx^}=T9d~#=oew#V)$c1} zuYv_Lf0aiZcXR4u<+)pe`&(7?pdUR=Fj!KR(B#EWW<89qo{OH>F_=X#yOTK|GYHQ= zo*7Vu4}qbaQ^*X$r8D1f9m4F}sbHwKs#;cFv5ocAi`@omJF%f*(8_xv1j~vosvijp ziL~-QwPBfOTpvPpr!;mVQ>X_I`LYZOdiuupjFRRkw2ckR{KeH1_YbGEP@zWTXdCMp zAKR)%C0Hx3(1vAt-Iw%?ggF$vpRjn$7(GR?4^)YSITU=`hGo`u`!VnOKPmW!jrDAa z9g1mSg;*=kk2$O^d4FubD3J~Y`w`X!PUJZp+ZEF*r!aLHt{^N21Bs_Y+!Wm8oYEW$9TaF#W_*{7)VdrXUFklnM?U%kW!3R)syP}WxR6($FHUBv z7bo>vT7ACqn5kZz_g^WihVa($?hIEZ-kLx_#L_AN`#WI9x@`Ik((gsF zo>wOEmZa(0hFw>X`5ry(_G+ zsTQ;b@~u_BdTUivWFX&K^{cm5HN`cMZ>{>(TdSH9Gmty!e)ZO>rg#VPtyRBzYgJQx z1NqjfU%j=eDgHs@rcby0v;AmKP3c+nzc7~cYJjHkHxw$$HQG8usZ*M`KqgIaX#c}H zAT^ebb;BgS1MAoCz)m-)v9&pm9?D{kT9l`SdJP;zw0rK7_;FCH4B+Cq}#7%=! ziOxJkhO_PJ$uIC#rQ%Qzi&d5Gj`q;@O=7cJ`7#?e^XJM=2y{!n{L8$3Bo@PrTYBd* z<5zUpzLJXBDh-6?S7oPR@;$nce}L?3q~^S;)szbQqCglc<4{2AYzS)Py%E<`HWf~U zKxilLauOJAq@u%{!SCrUUjL)g>**d4|E$^%RW8Z*Nc=Cz>zHlkz{03WpR?3YHOR}- z2)0J$C_<82<7$ogi){Fp|5aAo!5Qfzik!Ew#;F+Sx$PPp>3A`!~c)DH-VF)y88a_t(swqVU}i!S(%>hp6-SohGlfv8D)TB z-vmUFMV0|^6af_x5ftOTqoO9RL`~w7#69lDeT@c_s4<#Ej4>Kx)MyfoiT=;;oZHpa zJv}``-uHd}51$Wp>z;G&xoh3Jb*t+;^pc|=CH_dGWGL-~pOSI-uNpS=(@cB97c?=2 zKQ^@q*b}e;75>ENtLzC6R8jm>jo&ZR4|VJ+d>$T_&%@s*A5;PUJUlF)hbKk&JUlF) zhbKk&JUlF)hbKk&JUlF)hbKk&JUlF)hbKk&JUlF)hkqtHPC5Yd{)e>W2!EyHbVX1r z@@$|c9SY4ny7z>?(M8oh&D;~A!K4x{w37C2Z@OsY#pB#7J$;{)9)rT@@AC9TDLv*@ zH5Z@XYka2`F7Z_6+MN4=mn`7vI9oNzL2Y=HxYk>gI;~V{;sB@@`!J>3Q) zO-=di=IL_ev%mFDz^8{wnvcdNA|dAwbPRUT=p2a7goA}@ zAc}(j3&|jl${&P$lv9e+33M9b(~yHNobtb<8)fZ8+}!e)QR~z`HFQZt8kG518pJ(Zhp?e-Bc9)zBp*@f^w3?!t&h_zHB>&khFt4be0DulKD)+{(t5gnd=!apDIP$( z2meGu-pNWGs4@`Ge~o4v>>W4*w^|b>wo9saq=gSd``EUfwx&drpq=hc^Q1M@-FLmQg%*092VG! zA9$bjJ(}Eo<#xZ|njHcQY?2Rr$Z8}DUk1iv3oPnT_n%r*X}Kx6aP^GERj@b@4e+G( zXPQAe`@qbv6Y%k@HJ|#elaJ|ow?*0)Eth6hw7D*m2Bh%`<5A8#Bn+ZyL(WGy6{#G* zhJdC-mH*8q`jD##_vZb^j3-FV=>#oktcuT_LSJiDV;-R1AFm;9?61pU!ncDGI&pf1 zkk089uB2Qlo*>cFt(5$bZLkVlNr&Mb6G&hM@@GR-wg*S|QyN<8kniLCTo{erfK*nX zSEiBM0Lw%5ugI!bv-YWzujrkI{#=BHS`;DVz$A!V7S&PGz0 zLX#KKWw6rjn{;&F!vRgD4%rDQx5<>MZ%LaBgUFamn>+{!x5=!t#i&=Nk+jKTT?#!b zjf-THgd9jmIeY0CMB|Kd{(@7RiV<@DNv9R<)bnjCDE*RwH28Gm(R3O(*klL`x|mTQ z?V>5wlKHTjbre}u{Cx`@tw3QXD=AmY2AD3|Oig;ioF!yx{QP|q`DwYpQ*K?nFr~bv z4a!?Q<*CZpZObBbH_pBGuC0lN3K8^7(NgQ$hS< zXemCn#*bk{%`OgKzI>&Mw?m_Xarg+)>FZP|dpQ`N`S;>;8ntv3NwGibEBlI-B%e+4 zXDFFZzEZ7PPf~nep8t)=5l<`aWJ=P08>G}%pS(f``e*+_LhD3lHCl-EYY0%ahUlOH zbl=sWRRu8~+cAtVWCJEqG+W|VkeL43RRt0k97>T56v5?v07d8;_=>z#45UGK!@X3N zBjl>AJ%T=A!%PLOE<;Xcrcz6%k}a+QsTY^Z>S1#e`Ou z;nMMn`4@`d`b#UOvbOT|)!sta$`8rnAXB*Zj6y=Mf94gk9)+|PQ6W);J5b12$2)Y| z@oPFzA>lRTOr=y6*V5tbe^By6Hp2(NwxY4ZfCvmVvq!lF8ZI_{+O9Y3H0 zRR>;|j-o`EhcdD}8Op;q(B#86r1tNS8EU%IPp6j0zGU zFM;E!AWa=Z=(Hn-Lk%6MAoo##InQ5^&B{81r?d6~nw1Wv!F`9x_lb=BUn8AvraYwX z!;yanvP8jKk>$vaA0f9JCAXpE5A&x#x8s$y1G%BvJEYo}3~_7UW3u~0wrsEWvK@|kc-DLBLDXaZUswe$RPB+l&p;Rtus#cVFHHwKs>_#ys zcDzQX9Y3Q34HI}>x{wm#hT+eYdgmg(A+&#wv{(=XOjgm1ETPv|d09?DmV1!pC{pY} zmeV?B(rL#8I#8DIy7Uz~$89Jh%QK-w7o90x)I6O=C5f6$o|CfW-D^p*y0egX3q{dI zv*6-5iX3?>GTuZ5nbYws3i2op_duwQ+DJLvi(pei$}tj5WOIl zjDA8 zh}8IXm#7g-yZzv|GE1vx(3qr~th6Iyn(v1Bf2gDL73*D^da2E^v&whIRc$23vMeto zZn^h-xo+4#lQz7_=nhlh(}2RGf9_ICZ5&35nRIgOb990_Hc_+Yg*qOf(<4!x9Te}? z-0^EVJ??LG*hBIB-|3ECWWVv>BzTEZAK#lw^`P1HxZM?jn^hSAdDlWwix;ANv6(oOo$q! zynW*Hwi$Kezm&Y;C$psCcA7-TzbXxP9OcCgcN?7?tH}xt7v7xSa2qM^Z@9-Op4M;$ zhnt4G%J|=HLVv?ml6N$RI?>;79VA7=HIX#gaI=l&)5gn7NpERY-Dv!H4R;Wg!Zh3+ zK5thWZzEonI=7lIz2V;Wc}q21!JCwrz@3gd5!}weMfiGI>l1umsW)(MDHMAlYtIO~MNDWbe>z;Mjc6nqVP5_IO~MLDWcY`_Z&%9g5VU_@guyeXm?+pjGqk$^Wvl&6CrWo-p_vrf31e>+I7USK!t1iL9BYb&Umbwb?~ zQQlTSH|qqtDIygF32Ym&H0y+=IcXmJIop8JtP_-`hyp>dwzh$!StlgTW7t%ma$zO? z*qEW23u(0CDeM$5GlwuU(}=87fXp1i$V?;3I|andAxz9PBIgtUGlwuR(})76fR{Oh zd6`BOJ_WeUA&kp3qSjMD%N)YAOcCW3&8(~?5z{h7WX-&RCb1z*%QQx(TtZK*Ll~B6 zJQcM9mN`^lnX>=d3Rvb)fn^#=4|Z)^-yA}z1ePhYTT%j8X1c^_&6EI^>9-MBX1WBh z%ybE0nduV1GDV^;r;%c*;DfXwp%4AD^8PU_^DnfpOM_+hB3NcGKPp znMDH2ED~5|k-##G1eRGOu*@QXWflo6vq)f>MFPt#5?E%Dz%q*jmRTgQ%p!qhmIy4f zL|~aE0?RBBSZ0a9GD`%OSt78^5`krw2rRQiV3{QX%PbLCW{JQuO9Yl#BCyO7fn}Bm zEVD#lnI!_tED=~{iNG>T1eRGMu*?#HWtIpmvqWH-B?8MV5m;u4z%ok&mRTaO%o2fR zmIy4fL|~ca0?RBHSZ2AvGAjj^St+p0N`Ylo3M{iyV40Ny%d8YwW~IO~D+QKWDX`2+ zfn`<-EVEi*nbiWztQJ^iwZJl~1(sPYu*_4YJp`|3oNr*V42kd%d8eyX0^aFs|A)> zEwIdLfn`<;EVEi*nbiWztQJ^iwZJl~1(sPYu*_4YJp`|3oNr*V42kd%d9rA%xVM6 ztj-0?Z0HFr)6Rfp+P2|C`Cyqg-Ie}tz%qmXyI`5Y{}z@R+!t78a9?1VK?BPSc7|mJ zyMSc|Ghvy*EU?TPfo0YREVD*nnKc5-tPxmdjleQ%vcfVO4J?zwY_QCMDOhGL!!nPd z2XX_;wEl0yG7EFSG7EbK%PhDOhH4&taLxxnY^b zxnY^bUBEJnyMSdD?*lBeI2SClI14PZxC>ZjaV}V9aV}V9@xH+_i@Swo7Uza#7I%VW z7UzOx7H5HF7IzQJEbbPTS==owv$zXbX7RqjGK+J-GK+k$%%ZMgnML~!%Ph_X%Pj5! zmRZ~_EVH;V#Gmrtx3}nDE0~xT)Kn5%`kO9jKWWq8_Q?SfF?Ha!( zNx?E*A1u=qSf-nTWx76Crke)KbhE%R%LJBLmIli#>k^h(Ah67WG+1UbCa}zcG+1Ub zW?-2CA1pH%MARV!ZJ%UV40;|!ZJ&|fMu3;0n04S4$Ca<0+w0Y zJuI`dTUcgk4p?Ss&tRFQsykR_X}7S<(#H&uXCu8ZaJzp$2vnmaiS(OIMtV)ArR;9r* ztI}ba;VxmBRhh8NsvNM)s$8(ls$8(ls%~MKRo%lftFpr~tFpr~tGa__y4hiwZWpjj zHzzF9?G~2l=7MFqxnP-YE?B0U6PD@bgk`!}V3}?wSf<-USf<-ESf<-OEYs~7EYr;e z%XGVdWxCzNGTof8Og9%S)9n_P>2?dtbbAKNbi0LRy18JPZZ24++bt~9%>m1FbHFm) zyYRCkKlC!}!7|+(uuL}xEYs~7EYtn$V0s-US^Hp_Zg;Ruw>wy-+Z`;^-4|Gbb?_=`e2#SNlBh$1}rn$ zB`h=A9V|229V|22BUom%TUcf^2P`w16P6kMZ(y1A*hh^H`!!qseVVQRKu*`T*VVUu)uuMBUEYr>g%Zzsg%Zzsg%Zzsq z%Z&FBmKpCJmKooh(OuK7Xrrj+p)9x0QX?F|Dv~$5S?Od=- zI~Od|&IQY~bHOt0T(C?#7cA4x1j%d~UBGUGX6nRZvO zOgk4W)9x0QX?F+9w7Y|4+TFr3?QUV2cDJxhyIWXhyjxgid>>$$@t(ml?QUV2@t(ml z?VPa8cz3W&dmmt#@t(ml?QUV2cF$m$b`N2hcF$m$_P)Y0IBOSb%JGvx`1VdGGLjZ z&ali-XIN&aGb}Td3ziuYSmt5$(KCImnhDDcWr1aex`Jhfx`Jhfx`$H#b>)B{*%s2f;j=zj{9**^u#?B7#ZX8#FrGnf+9``rI?Z~Tvx)cXc zCKr7N#?a8$s4&uQfR1YHU7oxmsp!v&7d-_POj2MAKpplm6c*5&k*Dd)eTHjW0UPDh zWdkU!R*Q83F+VknOyXn-Lk^^)oC-1w%4r0rfK7&2x?}>y&kPf0X(^a~h#BhPk|lJ0 z7HEa6L5wtD9WFVQ&Y6dp(kxB!5SLs>YA2jbh?w@NgFqoJd4!~lLQJ`EArlnhl6UBo zQHZQ&7t4S`jI~1}z^5hup;Ja75*KQtvIT{>q@2hXj6&q3T%qMFD91*UKNC)tisdWR z%2^}@3eo;Xs)=}7X(v;XRa;4lF4|$>Kc}0J*ktW!Ht02RuY%~2)elpQ!CO0x*|R46 zAt8p=FnBA5A@nYy4Bp~s!Eyo+9pR!c<9kesw^A9c{fgW&cIFw&O{qVqRHNewrtS~8u44B}#Q#L_Ac5Z96uNXj5Cwn0pDU)X=xO6SWv zKBYQSrXGlE$+aZLNLQQyF};i^kNtT>2Qj*js&AveA3=&wOcmz8PL-p7gQyLTK9>~j zIJt~Yen=os=rq^~TH3OnH8ty z$74~;y(UH6G)l6@UmDLz{Rf$zk3)@<(q!tiTxwUM_Ufn`CQJLTFj+kDYiYzSr04d( z6DMf@f6>Xh8>zccvW>oao=q~e*1g8Xqb5vm|J|AO>{x{otoy)t+>`RS@g?I?5!N-` zOOug!z4)|v8AsLNasdQu#zlx^C|8TA(lisMd)%Grv7{a4;`X0sQe1Arr00{2=cN9y zOwZi@Nog{5TKgZxxu_+Nkt^IJGA@w{$W;*cgrhDe;v>3gkICiQH>8W6As0vgkQ86z za*JhnCEtg9e%lwc?MOP1hYrUY*q z7k@BedVLDeTw_WUwZlAc@FFB8#%%T&S zZxKtIKvC(HchCX%@w^(b;u+C*M?C6-X-Zv8MjK5DCcG(4^8#gc^wp%W)>4ZMC#$0= zauAg$ujZH&=#=|tH^uXtxyO0R`y{i(XHe>6|3qVItMR`hS=0B-`PP%Q{uOM-d;A|Y*XhF6Q=h<;FMG^ zY{E3oV8Wy73EqH)JIEwzeoGpz;YibPtLS9iF$4%i!?ltVf5YvhxWD1v_GQBNibp9m zH{4?5e?_uzzJ@!Uyrbd5r}`W2Rg$9N4kBr?;ee8a<(bCIg-LH|4Y$+y@fxn6!)v%b zK5qq7cX<1|@uE!K(;H6h$kuQLQz-j)F-{2$stW-wv;mhV7p z<@&*d(<#niLbKI-tZelLSq?Da|KIys3?|$HFkw)@gkPbA#JmKUFeqR`5m{RRCJYLg zP(*oK0VWIzm{3H{7JvzZ0wxquU<<&6K>-trD7+P5!k~Z&Mbx?lV8Wn)2}QKyWGq>O z0wxrZ(kBB<7!)v}ky$4LOc)d}p^@dC3@~9(z=THToD48wP{4#n7C0GT!k~Z&jVydJ zz=S~o6B=3T7JvzZ0wxsM9a{h<3<{V~L^DnXm@p_{LJ=7-;TC`ig90WzoM@HQ#9II+ z3<{V~MAjC734;PA6j9z5fC+;FCKQo?3AYlMFeqTc+u5J96=1@kfC)t;V8X2c69xrL z_=aS?4Pe4SX<))_022;M0~2lom~c=Um~b1wgoDz+gxdfn9Fzto+y*e=pfoVyHh>8S zrGW{z0ZcfkGnjBtCoth4KbUYEz=VT3g9#fufe9NsfeE|WA!7_-V<#{nNix8MBynhY z-~$ShBm+#?*a=KXk_<2*Nix8MB=La>N77B$QsW<|?EyXU6QzD(Zdt7+WI*8vdH`lX zA>BRI;zpWz2~cQ1nun;$d!>j322kjHdm)MZfI{cBwUCt34bv$o7*;^*jZLdn9_{Xo zM8wG-H4;IuMe8|Od>#kO?&M(kOB}5FBZA%?6%K-<#&fWKVsFGYtmI(h4i1j}HV4Pu z!NI1tIN1Cp2PYO6BEd;<4z^9?VEb|oPPvGK9d~eW+Alfy)?N-y|C)m{%4n=Qz0VxM z!C8lLaP~15Tf{GdVbN4F?_DI5=t-2S@*qgLO}GaLk(=tpA#W z4dK2>etZiDo7y?pd?p7c{(yrmKj&cUpEx-A8xFSN56n7!w#PU)buo_>$ z91hO9hJ&;3<>0)xI5__U4lYnGyxbk+;GR|v?md!&pPb3TeLqA{Huhx>#x)W;(CJO{g$ad7#m99(e?2UkA8!M9)I;5(mla8(&CkDQ9D2XS!CXb!Gj z!NGNBb8!8w9PECDgYW*8gByC$jn1jKv4Mk|rgHGT)g1ibEDmnDnS2;` z8l<}ue-_lKxN9~CcXx1b&zT(D`&|xx@(2g_y}`l#ie@yY;(Bk3^TWKb$5f0&E^bq|; z!b>TkMw%LM2=Pb$=-Li;>v zb*~WaZu)v!l@%P#)pD1TfDbB!0jQ5@CEbRlR5CLn#em|f&Vi>8Zi#w*n9QEz6f$sO zroy;S>I3O&DNsDlwToO-s25dNuc0<5n8c^9Lju;h$Vx?=I;Vt0^}YJheLAloid*c^ z<@+nAuswDlP059;;fPu+e%zX4RN)$l<*6ucbwigot3ct=Vz_Q0`ST2qk(i^Rm$2b* zGCWZXw?N_%t32wJueKpzU=O5Zkf6Q$#GS7d64kbI`KEIqC zm0ZH7I}&udi%-vhEiXs%zeUpW170Sx{If_}KKl(i9Z*Q^gccom03B@AtB|Vh6y&d^ z@gCZVJQ4$2spkK-5@cBC45ecO)*Mht1ELjGcrnSeOTg*EEf#dzg@jV9pkPq=B&hGE zv0r$h=5y)pUb}Gz$qQq_qVI?LQZ1>aE3tynk3fDUy~Ti6kz+}tucDQ7p=0~{k~*sKb}Hr7ru9Ar#XVKt<`q~?66ZckbxCG4ay5BN#O~$jA}^Cut)Mb=WG)68bn;~ zy! zn3qUZVNIvcfH^pXrQ8U^AX^b8*)(5=xe!xn(JWys{`3hjneFQwHj&m1V_m5GQu`M! z&}%87bf4b>OGf?3II?)h3>#_dyCHuU*W5{_#gL^7v9xKO^8lUKqS?Z4!uY6>WW3R| z7aK!I40u{VzD?u3_wJ!sb?TL?j#s^zLx_t_yEv+$5vVXodNYR*7qqBE-|xi{Bi(nP3j~`1Gr)ylC zo!jgMSgb|6FVlQ31Btt)*X=Z)=jDa}3Y)&jr(N^8-{xw%=mL3FSc~f#EG@tx#GqAS zth4pK)SiWvwy%BiZ#_tz14Fs;Fxuv+DiB<(_VO(ya;XsTEP+Vc6iNMdOIK`DF+7w7oj}%v#=;**n&{msh zYhWHtm&N@}bXwpWL~Bj-punxjH)x`Z0*N{&Wc!^uFbEAcS>xKYquj3^%U-3*Xy&Zi_2SOYQn9?)`K^edrRXn1$FB9o0;+jFm8$s8CUkL}#zIx`eKB|`zF#_B#p)=l z;s>P9o!B!}tl~$c`~fP+>r{q12dy#yUFF>$Aw~HH;5pQjx+;GhWnZ)ityZ<1RC?0@ zii1Ol7$w4G6~c3>{x@Y+^s3w;SQ*B1tNu@n#BZ1>P~>=!)o;?1K+&6ic?eIo8h_Tk z=r?{hc+%C28c6&A_qWMA`~~D6QPUMY=zFa9?d?CCtS@ zZ>Zt=)ZS!r8@G7~dk}ga-EWH?Kp)HUT7|JEp&zGdzi5eR*i=2j*s0LFX>1p*HTm%H zfOjtT2o+uhlXnJ@$&LP^pu#S6)H~;sgD|!^^w}uSt;Q~862|6-z8?7;oi!iv6vpO= z9y^T8Zuk30Icmbb$p=)K74C>w2TexqXcQPz$iL|M%pD^!C)Vdcij9|8AQqr5aVJZx zDt0Z}d5Xlsu{H2ARbq+QKj3AW#F}CS@G@OuqhkwbnsH}HtTi@|n#7$cv1zdjsju8w zm^ZBGL9t5^n=P?;?VX-&hbG5`4$L@pABP6yYHWWUOk=WAMMew;^ zV#`zryHR>Sn%~^*sFzty^Uxly-)pmky3IZNb_Pyw-43}XjTPk{D4i=TvCew;x!xoj84=AxcN zc?4cF>9KY-+BqB%c)#2_s;Gj#(B{k*?QgY^g+B!QocEW8+2<#baE9Kwq(vsmY;5jtuD zv~?J}pn5FUMZ=9<`V*bY#^LK>7N&WKSk_V`NXW_eD5zym*g@5IP>%*y`aNNhB^@Z9 zu*0g`&}grBZc*&J>RTv9;I}@d3S%Qy|79S_PckhcrKBPxn+2P+`Zsj-2W~JX+!>}x zv3IL4qY4K;H;yqHOg~~rSN{vWaav}3o0%P(z4~zU$Q@l+U>{h2h!WC`o%<}kgvJr} ziS-*ObO?+#9l#e44_I#d5O$*V zuWQNTB9um!$!9LkfzMQS)F`hcUylD zA>CfNno~Z**z(qwU_^~h+j&TKsa)zIY>Df?lb^t6DHke)EpmMtO_hO5GjipzMzbCa z4x;%v@SThz8v|^s>z`w|o|n}__`5LpHVVJi-<%Np#&ey6I)$(;ug}8x`eUk?JTz7K zZRlo?CSAgk*_oRGV!D8kdDx~t(Fen1lqC<&DU%R(_w^DeKgld9`v_y-U$26}V6&+7 zQk(e(uLSgV47(<4EoG}^zXHj{B80aEx)im&Zz&rfE3rach4CIi-vEO_W(L3&q{2@^ ze)L$9_c3zLoDY%`?;P|g0J2-)O@%%RqovYs!DCx|;3bCs zH!k8$zDtAVOT5<5D{$>zo2CfiMTg!TBE=rbl*`O}GahqQ~J% z`K3=M>F}OJUyRH?^yxTV81GK>>QSWYW$NH%C}T2=w<>xe+PKA%y%DyMUdF2yy$s6l zn++OIX?l4MEY3paKQbFM85SdXi+UvQd-u8YK-$3r8mv#aZ_OBz{+q4#e!!)n5nU=U{Oo6 zNIi21F=CQ{#c^jqcOxC@4TsQ|noH55SJFIEZ#aZ0Ew0`UD&|P%R!Uuq(>gvqpd?H$ zT+puJ{YoLGRiz&DJLI$BQCfMf)Mc=sM*XKJBMu?U73>wvX!lfu&C5^O*a0t)3g&Cpj}5JH{x*!Ib$_p9yEEh zD2)h*5Ph8V>hfUWCxMAuVYid!tBA)Tf96~l30uvn6#49}$k3+~N-a$=#&@=HkgskNnNu~b) zle!Yxg!4FrGDf7ll#!|We_%M3#$(Lm5K5m;(ofMP8xaoS2p&UK@hBNzP4jRB973@h zb6`>!BMzZt(l1OIj5*X(5Qk6{h|vRBMhDMlPZao}~SIH-!iPdWK3PbnNif6BHhy4~w`S~o<5Ls*;d%X5UM6b>PNSdh%KZ9bG&l~PLK z5T>Qv;3QEf43FQN5rL^yVRY_2C=8YVvPkv2v4& z2#2tfi^*Rn4TMA3$wOOsAbEJ3ZiNxy5c&tE4DN}Zg>VS{7BaY3dlteW^jpZ_J_{D7 z(IF-rLcfLNa+hZz9KuWs-@MWGhG#1rLcgsyF{rk$l4cR%5N4XGwtC!>xmdy>^joZ@ zap`jDr?kRsPZhOMl0#^EzkqDUu7q|Y-85p%A@uu@s$G~ghzN%;)52f1dy=*>;Sl<5 zeN}rcX%-Ob7*c)V5c;N0zM0Da zXqy4EVJA^535PJNOVzf{GZPMBrZ?4gnP(v!!b}TULA3o8X7>-I{Dec8H9uFH{O_Kv za0t7$^|$)~8n2u~OgMzuQ~4Sn3nwX(a0t6jpxRFNY=uLZ-PYF)w|fbML+DSyBashM z-?u$m;SgrG^_9G6aI)mWA?!MVYCFKQ6%Ju`TVKi7dkKU?m_0#iS=V;Gmq0j#*%NT_ zaZqi0yad7_%$~q&CDo>BGMAnb4q^5LSO>^rbV4%{yhWoeCLBT}pyhSy4wo7@ay?8~ zgA{rKr4SBb7b#MmF!g+-*iFwiqb#cJZ_xg|kgg!%5TYpFDx;Jh;B^f>F+_wzn5G;I<>muPDI7vP z1bQhKL3uVUm}0^q^cT>pEmupmotm_b2#3&bi=k}HzVDeOdhMV`HouwYQnkI5G>Zv` z&~Jvjl=1dC%+A7!PB?^qGhe3RVa6;b974aDm#J|%HEA0W4q;YX)wVuq784Gk-^^FQ z%VCD!R7ZC7`a$pX>iP=!c+xB;974ah%mTijw2cUdFsrRYs>k2>oWh09$FD7jx!XeCROILl;EG8U6znQOq)%#NcV|3q)2#0WNnBuZ5=P_>v{y3O@_orkg z975jZHfE@YYFnGMhzW<#Z{bZ<=01I`XDb}Su5JBg_^FplIE4OGXe8D4p=Ti+Lcc|- z3~W1%YZDU=Vb^w2eN~Irt8-#h(^|Apog>HfbUxCNBa|aqe!wYodL*f;BA3PR5+$+= z>G8JrqGF0&E-@$eXL{QkxuStAt!O|UFrJ~yo8$%a|q!;>*=zQ!5l*DW9YG;;kXI=Sj-`mN3XdsR%1))J+A&G8AO;v z=+})K2;F&fjYSNH5L;_njT;a7-v^T1a0rcjDdc!%5iuM>mP;Fs-2&z77-`HQ#BEOt zO;wG(4mt-9hs+`L>G*DFZi)DSO^4~xFeV(rly?~>{iyBKxV^9{L?H}^P|R34jOl35U?9kn&-0(a)jKr084oBjP$|Aqi?R z8b&XqZL<1&+8+(J%_tygkU4}0aSy5)rp}E z%@?V(x^fh490!F%hEvPSZ2svrlHm=#&^Pp>_^B{8w<*H+y)Nt%=Fc?V> zu0iGyb~0#t76!kqA_L(NE~Y`HCGl)MAAKhrLN4N*&tc82%N#zh)3`2R}!+;eIA=;2utK5_a=?`gc2zngCtc^)?K=Nv)DT67it zB7~$Pof9FzX&s-=tEO>}(c%Q})~yfI(lhuZzYpahiSI~)!Xd<@NrPwND<~g#h{qv> z3+;yJsOEnNHI1(za|nH!1>Yn!;1IHcuJQh8aN!W*I@V=_;0uNO7MYK9;uNjLX&s-= zduE(cs9hrX8+ysnj}m{RWja@O>rcrz_*V@Z`e~-!`UTBB!H-RC0(R?in#qEn7=4x9 z`al)MKh^mCGW}4;uEh7+LE#X-PCjsH4S-AG5GF+vFf9m&Fey5@l0?EGOo}QoF$jk+ zDf$>QgK!9wqD`oQa0uU#948JQOF4f?Yl)!8A>>P6O`HPFJi7M;Jr1F0<{SYHCY2y_ z2wCcWK@R1`}k3nJd-}Us&AvAjG4>jizsQwAp%pqizxi;tg+Dpb9!c?|u z;y! z5c+IBBCVQu8f+dz-*_BCpAANun)p4~^rlx7L61WyHc-1igP((RIr8y1grbK^nvcdN zB6ZF$=vcdmuB8F!OgK1L4M0)w|6nrs3Fobtb<8)enRwUlZ8 zGHRWw=i*f3I;WA2wLfya-$V6+ol_9PM_2kZn((UlH^kcIU%ix)lz#dJ-A69zK9pZZ zu?`DXpjJ$7%929K!dIke}RB2dWIj^PZ;J273oie5=(uv6X%@y^VkPN>h(jHH^xk zpP`12gF^^Yt*WMbLU0IK=)OoQ)pi6l{isCr*9j(&PuXmKjgz&ZPWp1{n7D8VWjSG# zyP>>=9@*o4~JCd0NTJf(05|ARW4jCo3K`#mi1B+@Ze&ih*4d^O)re3(O+ zDp!p@x*AcY%5n5j&|?mrO&RE#9VW#ECM0~_qWj4$Q{{FV;Z-T+VfckPeREX)4jJv7 zOfC-891y(_BJI9UNB1)vP-3(N{Ec+}O2=B1ex&myPX9*Q#Z9eBJwY+G1k60d^itx& zRg9psrJLwvES)U-ZX3xf*j+6$sYM>O$m8(SNdGmR>^4Qb%Y=KWj>{cVRv$$tWppwZ zC;jQNf91Sjw?;ArDvI!@qW9i6Cx_vwttIeBUY z$T>w9;oAnSS6oNkR&=pMa;R2}~HdfeBjg+rh1Y!qcLc~eHA2oYGUo`8b83W~V`PCNK+dTVa*mOOj{rHRM#wov z*4hqoPK}UrM0Q6z$T>Aa&Jod!BS6ln5ps@*@^me}qi+W}r$)#*-(aevs}~y&HA2o2 zk<|`zPK}UrM3mPKa!!qqb3~+S!3kJHO_6hqD7-dB&M~6a zwJCCri1LbNRMygxlF2zDvS!>slbDcmjM3T@ImdV^8vjpP3mI~btj@;YT0#;b=NL&3 z)^y|Fqy>l}=g91qlz^O*F2P+RT>^5BUn1n3bP32g=@O80(j_40h(x_rua%`HJxj|0 zee&nZ`^Utb;;RiYhn_;1m=h#oPVmt@M1`0WG{l_xZ_g%?pO{ns+Hyz?F{hZC55$~0 z<%C8e;)F)^Ll9mxi-W~Sa!Kx<_L|3=ckawbMHqaf>iLM2K!HFJ;&+eRP z$88)O#kiR0(Tt0UuKODck13=jwG&-Ggo6!pIXM1k4mMrN!RC87IPor`&|ys>q8F)PW1dL4lWqR!QHbs zxMu?g_g=}tPwwI1zBdsJ8v7p(#^JS?GpJ=Vg29tIDiE}7X71%+mR*T-vj=c6=Kv1o9*khg6@1P7OY%E1*S^n~vW zy>cW6-(JAMcaG)Ys*5?e`Uf0bb1w(iVt>aOdR-pP=g!dU2Xe4`4hP>onu8lo=itWg zb8ypB9DHvt2S2cDknWZ^2e-}Q;798@xcz(%?!1|UyI$bn?)Nyj2cIW8L+`EQ;3s1_ zxbF}S?%&SA1G_kQ@LmocdX0mJ@nG%@eS`rqLmzFWDZ&~0SUU$#oXx?LH*@e5e(viG zeflpPJX0J*?AbU6&rRjv`3??VIG=+Tf5gGhp5fr--*fP*uQ>QkWgU{g-ORyzb1CRO zv{1jpVISgbAYP;s6gtB?W^i!S@f@uGkb@0{Vdys2b8zff4vw48!KPC<*nBkyC*ID% zNzZby?OhJG>j;vc66IjWWDZU{l7nw;=HT?pI5^`69Gv+A2WS0-gR@KOk^I~N9Go|e zgBP#ilD$+#_Zer{%iB=5VS5I1?3F7y_KP)a`Kxbn@ape4ciYLDQ`+{DvA%7xHh_(|9_6%riWl>9-X1S3Qgj2 z2JMtHXs292JMWPn2JMXeUjf<)(OsHBJ0tfIv@?qmGH7RH4?sJ|lQDyKM&<(TRQ!g_ z7_?J40PQ={wW_c|I#NJ8$HhrTR2@w7BL%dxMnrudr-TgJ841u17};7?GiYZdKszk( zI>p&<-aBOB?6=?p4i5eq2TMQUU`-J%k(~XGOmNUKo`MRH+*;_u&@~L&nR#V#;D<*j zMd8V`f}_zvADLfAHVoV8zl6^8NfYTaf`ytgA9_ATCG)c3I9IjXVDSRo*ZWMG(yY`? zQ((b%ZrOLpSx|35@?$kJ&gMZC52xqkOUOvqJp-E-x*+=6*3+~=}(jS3rKvT6Iw zrW6mIOiO^G#YGF-sU}6eRv=N~u4S~&V+@gd9x7G-?y)2<|NS{QF5gba!t>9gW5rwK zVe_eUR`bLbdSWlSSeGuIX|BjGjvkR~AaqG)gG^ULV4j3#{DcLAO02x&^Lp&X~9rTks);;L7njj%d? z({ZZgWj%+CXAXqjoj21_g>@LoAJQ7Gv-qG!`|JX0#quG$=~#H*U39F76;f?4p|hHQ z{mL}gm&?!qG=Z7s>U|i;b2nFC#^rcT)_yn@!Cgd0cNQh5>Fro*O{y6ylHIKe-D^>l zQ*NYV`SCx+an$!AIrl*v$L_*0aUSG_J8}MxAJZ{%EEQTmPIuPkn|9Ek)9!ZaLKV3b z;(v^!d)v~96gSf=&#^F5ksrh0DjLMYH}ZwgQ?Jawk*A@H?@!w$!$pU5DzX>iFW~dI z44*3UALuS^p~>WAbdTmvlAfxs>qCMG337LXbsY^5?kRUH=KGJvlH=$&7=R;T9p^}f zkh6s<-a^VX6Ud=QxGjeAz`5PXo*aizvQdYZtkw$h(k( z3l^Egr|x?sV4cgZ2Rn_kLvzQn}RdknNuuj~7b>hVu{xFLHtP?LY%M1bQ#9b2=uui<6i3(UJ zUT&fS)`?e`sDO3il_n}+op_ar3RovzZK49!iT5{A0qewTO;o@-^yk~r(pFT!I`NP` zfbP6jRKPm%h@Qkz0qex0rbYtRi4QbU0qevYbU$i{Bv>atL}ToDU>*94^Ivh?fOX=- z^mOv=fpy}|`Y?_guui;1hsd2BHDI0iWR3pyz&i0MCcaz1I`OF{ZooS6X(n#KI`Qdz z+wj0T@tItD53CcP&0rHdngr{_4n)5W!^iXW?fP@dy9q0Sq09GEPGa_chx7UVDlZ{m zoy5E-Nf@k?m=8nM46sgOju`~a0P7?cj3W)eI>SNKz@Q(|AGK0V_fVWEbOZZ01GF=+ zR$5av1GF71 zyxKsD@(sr`yC*fEo!BDOyzfP%V$e6b^a3&5bA*l+yi*cD*VPV52h|D^kYkOQ<6d&uX;g_sJ4ibS!6z@VL2 zKg%~g!Mju-g53oM?Zm$1`uV27D7G9Jv=eK$WDZ29@Ej!vZi6VcBp9?4Q#!+kWD>=0 z1%r0z$G)kWxwEODG9^YogThbF?1(UECsylg4b>N0Bn;Y#0kk7?H}*>yv=e*RIP^6dc2O9#6T3#v=h6H+dP8(8V2pe9!T}HiemqUK|8S}reRa{h+PN}eQ1%E256^ow#4Q& z1GLjPS7P&<0orMtC$R<10PQp`kl4Y^0PQp`l-R;%fOZ;}O6-tkfOZ;}N$k*OfOZ;J zNNiE_HHfW}*kR28?KG~I*y3h@b{db6*plX<@Og~HmNo;l)3{z@%TxqAcMRHT+>Uz5 zYDI?cXa?;x?l>JLjSf1B@t#8t7*tHBVAl+pJ%Ub~Cet<^@H<1!ZXQ%e(sM8v&_TfO z3^`X~dCkD@3^`9?PV@6*Gvs`U6*U9DGvoq^1)7218FHb-`ZYJwqsfqqBv#c7{LYYz zC05%E{LYY_5(_s2zcb{r*)&4wv8x&Qoguq0l7Qba&`|?DW%lTSoRGOZ2Mk+4e$%-; zqnXPypYo}@i^P0T1~3|V6ehs)40!@d?Lt5o3wA;yZ$gP56%Khw zln{^-k;oTN0?#w#Yv~3ENclAd4M-mCPbnGCGeq-JNV|}bNrZw1c7lyl#`6q$pGzVZ z5p1L~o@dBk{9Pi( zIq*D*=PZ#!LIuQ%Gx{#H!1E;Dv-sS_QP#i4?$0DJaMCFDa+%eWc+EJ2)@93RBa~qE zBtA0J8ziD^boQh323SY21CMdt8^et4Nv!uP;fZEI>=!e(CvjyKEw+(0 zuswy*q+4CMHYP>R>1Zo4&f!8++}2bQ_#Skuy@Vap2SMOCoI`~*+#Lq&DfsA>zx}HJKl`#N&MEQR8ee! zGqxvjl4%hxB^S^%8n(w7+mpD#m~g)sEq2Wr+mra*IL0V3U5dSQ#`Yvm%WQ9RpTIUd zV|x;JbYX!Vc*gc5F3ws);|M$SjO|H0l37Y)fz5ly_9V{nTgYtZHRf z1jhCx&g$e7?-dx^lh`X)dCF%LFB=%!lNfD_GQCS(o&q9x8Nt||#AhiNDuS02jO|HW znvpAyH5I{Y3&!>&zLQa8V}KVLjO|IBm(@eG4*OZa_9WK&n-gN+cy4r1rwCq%Ft#W0 z$5b)7300Ig#(?cfBrKV{xfvj)3kaD{oI z4?jUENAZr0sX>YFYbgd7J2iHqc>TuIpu~?f7G<4A*GRmX`x~vwoTm0>(B_*~%i2jD z0NQ-h8j0npk*Lf+$uQ7#v>2|FWtA~JMq-W{`7{~+n+;DC!!0Nqt^UO0NfL9^uz{_V z-M_6@$iC5eosI|M{U9=wy)bNusi+-Bv4@c97Bfg7p%lF-7$2>N8i@C%3$aIoeAGJya5G3ATF^*zRzpqR&DGwtLeq^UNuQe3#|a$xZb(Izr3SFv_K(n&ixOZ|bn& z+P7R#QM~+QwtLeF{;AqqBd93ei!#o==@^q7!(Nthd|md%D^td~Hv#9K@l?+>9m_cP zroTxGM)6jaaqdlrS<*&cBGnh~S{diw1e`lIbyV~z$W~A#o9G9mJp8zT5K{-@Rv5)g zS$zUbX8ZcXg^8IV@xGRE?oA8ya!P30!DoS2xr}pfddCbAXUM+HI~q$rEpa8MRJ-Ia01kuga!A zcyr9C^`_I!HJNFF*T{@oZ(641*5g^IC|*%BW4)P?llln6&PVFo&6%;6KS$)m9Oph-uO-3@$xaOo^Mrz=*gNqD<)GfNLGJ=l|8L1#Z^ z>EcqP6WhaRF?tWA&iieTl9bsL&L=Br-Xt<9Dc$9CP%XcOZq2dE`5Zrwl{Xa781=uQ zUbjg(U+mx5SzQ~6ks2RII{)5AP;9MM~chEu;Qgf--w?l_dshvFi z;&6JikbZGVDeBLK)j}$OcXRB9X1yF|*(C<^m zJ}VT=#nZQ{{}fi|pv?XBoC9SCQoHNWWc9^#)SuEF%Bj)6oS3XNH)s7&=!!;=u1?1& zoir%YN#eCLB;-sd^S8v8`3N3+bFz`RWTO<%i4`h zUNX*Ls@!m*%CTYcja{RI-;7LkMpCi{oHnc>?+wpTR*lZbbe#0!Sf%ho6lcWfnfQIO zB`7(FwVnl(oK>n}NkvLS$<^J6q5vgl2}%yFN8j=6K}uEkjHeZpoYB4st?Y2ty$chd zL{lH-h3-708+lCuOQ zhmK8pl@Wq5rDmTGJD}t&4<*MH6B`~%Y*=?U41tof1SQwmA&gZWgL8nAvjioFe(*Au zJimmi3u#nZ9!ic!B(yRtXV=oU7^CDY4<(mrG9D&C$ypvsj!n4CJk!ix1rwm;EDt5e zCTK(#2Q~Xl&%{H?vB{G#!9mTw*)#D_a%|!q)a(~M6AvZFTCR~)`g527C0F2~5UAMQo(ZAkvYE*B8GFsMBb1!jdHrfkKKD%arkO|~E2&Gl zN(F?H69-c8z- z2ujXxA-Ax5VF8q!B`7(JC6kM)|AnU%lpID;a=Hk83?)!VWptt{3+`j zXwqVooFyo^nfbmvr+7+1$>BG3$vo@Zp#)0K5|mt8${n6kP;zN0uZ0pQIZIG-a(j@O zul_+OvG%Y8C6}gr9ZK9+EJ4Znm2>|EB~Wq&Nt9ehKdZS_^d!V4RuUzbGEwy-pan|K z5|mt;_XSYC87A*Zl$C(Ef|AR$@XZ_bfA(wzCFi&G zCI(esG|Xgb2}&;0Ow~6hEeZrB=eO`C2sL*e%=XYWiIqgjN$+#rYL9~!E7Sr;$@zUq z)vik#Sb~zvwD4E$>7;Fepyd3vzN-BxX=VvZF4Iiamo%ADS%Q-DTllKR;2fajtRzZK zs>ZjolwJKYxMMp($#rrkRlUeF6O>$5Gk<*_OeQE0lw79-zDn;++FF8=%W6yOeR_W< zMHQ4>Co^BQhI_VxlJiZSd^0zX7fWCUl$<3fxvVZ#{dUhxP;!~xR6XN!*pZ;*GA(2U zQNIU{fReKWC6_fn`Ndj&ulH6)^=t(tm)+J^@*tj}xiJML*L4C_KgY8blw5XOU&&AP z5(r8zdxF%muKo@$fuQ8FC*b1apz8N}2?QmVJ%QIss=jP^vc7_n%bp;$7_DnV0-)py z1SN+Aw7gE;;Zg%fu7|qKNTDZC3PH(rks{Rz2VH{{K*<#_N)8P|8)sfa$|9`(IamQD zS0E_4gQ)?Mw?uA3PJn}&`yOoZdxnA}N=^!lvZ#8S78s0@vjioFqIh@SFqA;aS%Q*F zQyu{2<^xG7C^!W(9(h^P72@8kYbwh)Y4qWwllHrzXt` z1SRJ;^A+&>Fat`?5|o_Z%vZo)B+Uv0CFeKGEZ~<(TT4)KS#4E)nATyEv!LYsX1)SW zh1nRo=qy3W`OSOOp^yoCPK4H}e(neV73yX9-Hq zZ{{mtKYB)GvjRcM`OPv5I4)^x2}&-jt*Xc0(h!$|lJlGS3V1HefReKWCFeKu74Y7q zS%IMB{AQU2d@E^d2}&-jEnW3vOwI*@lJlGS3J8)2qvR|>$;t2aGxjJ{{VbSax6~4p z9PjqY9*w#8Zb@1c2ujXx;Z0S>_Q#&Bpyax?i6@56(07dVoo#YNY)kjDhaH+NuSb#KYUeL zSCxKA-mtwm5Y&JIX5GnV-$1bL3Yc|=AIWPy5UjfbX5GPo*7Ibig;{slXwkJy&;jet zV%D8J8|ni{rD`9ivNG$=V%D8s*Y*ZG_fv|gtZ4j2iBd%tUI4h`knVy>Y0}lru~QlVcn&?%Q$AMw#(oP ztUJrF?!=6hbMJ%_th<6F>&~aGeF<8y?kr~A`HG>YFzc>BSa&{!l#ka5^yd_*Nzu3F zSJ#bB0SOu~8m1Mr5=|$cHgH?o=7V+D!mK-N+SB*z^TE1nVb+~U^n9@HT9|bw5^FwK zcP-4i6G`5D{4A}7S$86F=7V+D!mK-y1m=Tv*TSqjk%Sk3b=ShIJCPXH-7K)~TIw_k zicRDyus=&Hy z5!T(rI(mn0YjrSKcd-d%!ZfxPV%-JdO2rm?GGg7a%#D&p#g6wB#JUp&Pdl+IJO#1t zQYqwxD&`;+d%!ax)}7A;p5#l`ZcWjb;_3=40*r01+)*aILov3`U?nbm= zcBt(?o|ItSwJ__Bla>ud3RT+z9awiQ%(|1Sj&)MQ+GQ{R>#l`acbyFCf!1NxU5l{p ziYOy3i5D{Ku0>dPT*Ut0hc!MQZDH135T$U@hw2b!-L)iHcZV?Rt|iI3tJ{l&VBNI{ z>ke&5^D;N(q0G8#@mP1_mD@4Zm^4@3jH{rGGNWG=(y;+u)##wWQ=J9~aN5YHi(aHS zMvK#Ox1RMdowPj3KO*8GiBCCOgmq^=<(z#w*-M9btUI{S?kjXuhh2+v8ec8Uy7Ork z-osJDSrJq93anY7Z^xl;UpJ@3Fy@jFK zyyYV;)48%|fpypNR}CAm?q-2?*YdHcO~9T7)?LddMqg#m0_(2jQ;k>VVBJ-Lu+k!| zyK~70E8sgaKm*oYi^sYXP5&y&j9GUr%(`Q#dn-AVpXu};?dh3yCwdGD zqhH|ZnRO?6^oQ!d0V@0gt%X^4tTNYT|MR?L%(_cut7hK<6<@X<>&~aHdlqW2?plO( zhpUYqOW8!t{t#NQ?pi$7og}^&RpKjQb}5<*th*MEb?39Wp0sLqGi<=RYw=ijJ{ycQ zHTzK5fOXg6vF^kMYWEcQIh0vJO_zUso$Hj(q7r}SW z%(~m3KjWdLEDAcX?)LvserC)%d^b&h-3hvnp?e4n*Lo&ccQbD)E+cmsQd&=8*4@loiU$m)4`7jy zw-!IMBjTY8jCm&OobvIgAcli2Omq`QC_=NFP z=X4S_plKVOi*PDZIbII|UDjlVZ^rLDj+c>Wxl2rEI{dGp;|@7S6a@eD}iiz6nJv86T1$48_Pt zDM4rhWjB7>QoVudHGcZ%Cz>CkltHQ8=jq4?WbhO+z^C$!&d+iBD>_uT)0BF;l@@Nb z|Dve=+41CNI!tVpiTE`>&Zm~zfGrI3RzI}Jy7 zHImle?HSA|rIH-xGdPM3-hhGpFl88ZrFf9cF%6m@rhEk}`C-b`^~ggkU~L|DD&bjv zn6iNS4>vDU64f>aI{9JBqcl*`QgbPiKTOF#JY9>PKm#{A%gIFpPN%Y9DhnE&Ix5#x zXB(6_ZRFEwZ&Q4=L#~D|D214Y_5a7&b%$3`eC?fklW@fVAx$6wVn88AFiH^w6bKrs zAYer>f{0!bq9Ejg6pv8>iBBUdDc8;pv#)Lgc9xohjA^{31|G zBH!U!z-a%Y2RY7L;?kxAfoabg6@vf)$GCZLsm{w70195+PJ(r*CRmq_C&jpQ2h2eU zDt#oc87H!6?n0O0^m)r1=U5LzFU@c-8H`K+idh6B+~d+W$z@y`xt1~v2jz)NzafWl z>B!5iOSL9Wzf?*!E)7rhmDXBVmwtxE;Kins%HW&A$O(WbV6;SMl<17Y$dB5@LrG+` zFv4j6hjEFDOPQ@*dMz?tmy!b(zs=2qOLd-pjfKahIy3H4zAB?6@*J)Oj21@bYLz$^ zoa3B{H)|kSlsZ7^$Hmk~70J9o<~JM+fn3Z^EmaqTkqttnvj~nEwI}hd>p&c@uEIb; zv*M0X;fruW7^+!4oK|1E)MwSqtIT)|s?;uJ2+C$) zWKDfuH5~6R+t#ISRXb<<%l0Mp2nb7kRXqdE6Xlk=)a~kWG=JVR;&Nw`dJe{x)VI~) z7=pcqaa!HxQg^AhVRM=xUiK+cPew&kcdK2vCnc8sz@_e0cP;go{e{%!Xy>W>)Dnz; z^Z^dCYCE?|KOMw56G8l14Os3Y9!KIX%+ON5Q){ADs<}(H8tfA5)VT|+&Z zZqBHuzNfeP(-p> zGJAv5^c+%-q?|sMBBNCP43hXXmwH6_f=ayX2nDQ(bm9D)$&p$Cjsc;5XlI@q*A}X!zce z`Rz#@*8?&y4(*xjIA3d6GydhD>Jp=&J1=#d10I?AV@c%kr_@QIzcFqll$_%duME|o z&n1++jYJ*^N}U$^_-Z@RGhVMNK=*??#SwmxE(?_($Ao*}^S`Zr1r1 zxEPzmCD$i%kXwKjx@`Nxp>wUlwM{cV?4E|Ue3@ojHHHoU(yG-9oc$O9VcJECrX!nI_V}%_R$1g4qfx1XS(J8O4@@# zz?RpwHX_PhmPqo&xY<}1U1Ke0=&deGR&uW?aTZQ@Gk9@^K0+3rBTIcK^dMTYpg8&O zk-%-|)Yi!z&`>GViZI^!-R0<<{9dUwlx{g<@fN6c0VyR$EM^*Aj~<;r^jjPJT&~&N0XD$In6Sy=|z}3rGQef14CoE;k~HusXH1!!w_ft} zFlz?WF^gR)4!Puo4w%IsEr3tIAy@*#HZpL$EZ&!byPz2^>$J?=&q>>f zV#QxKy2?lUmo!fLioe@;v7Pftxfbw=9%hRV{)uwN_=eWAD`^}^i~s%y5MEmT2+}xI z7ALFD<#tRn1@p^DD24bk zi5&2XTdD?=5{W~025Pv2CVpGH3MBfMK=&*ebZq&x*X@K zN(^|WwuXao)1HS8CL2eG;wfs`jJO!GIQf%Y>O3_Jw|X>7R}nLIRJj~g>NWH_tELvA zL56N33wwR>t?FfbO1Z*PJ>}7x+n~{wM7{)0fr;{3s56p>p@OI7Q1ia2 z1nxVfxDW15={ml*i9(!@Yi0?i@8{RwV;d^Dl-|NO`}hDXK~@1@zgME8B@r&<;6+Bw z02Wb=^y-dktTR(gD#fIdO)A->vP~-6q_RvZ%cL?*D$}HLO)6KXE~r5>+Km_PrR6pq z&5f+w3`*qV-stp)v5S>yl95o`|EN1zbB|EzEQV*#mJD)X`2z)u4#KzR2XOlbq#MLBbEdsGmmq+5n zICiG`n6D8O$oRwW^V-tSpm+gm*8f)wBD-1v3`5!9|{kboNg>E>ZQL+54u4T{q? zlXSbDp$T?9qYEi!J>!uFm7@=mr8ht`RT=s?S?*ab!|8u4R*pU@A`Cpyf>(c%!K`O2 za1o4f&w9q~CQHQ#DShCQ084Hl* zw7rxByAq`dV$^|(FSP7D!LswMla$*C1xg}IaV=o9|JKot^Rc+IhG5p6pMwC&uey0~ zr_Rgx8WgAP7bIABYJzoV5=$C)R-iRff=d6H*NpdAH1{Z%;q-^@Q_kxihTWQB5Oa(> zm#we}M!3hFmy^r5^DXukQIGP(or}p~+_`X>b*I+E>9?6Qx&vdF?z z<$50r8-p|CRIKd2&tct>UW-YFRQg^9cH<&(w z8i_|nkSYsPvsU}dUg1*rst@n=m#rXG7N$AOR#c4IB`DdAIa^FGGQOohzClwlr9W?7he z_!(nC7nv3_-EGJu^2x%~x_eYyy6%lluO-=Cm@+-M=^8ha3sXANypOo)GcJV-Q*ny9 z*))C6C39iQkT+peaGL%|in}nSDKb9gXJAfg7p6vUQcewr5pUuAB67&W)S%mx!?~Sp z`uRghl!d83(Fkc`DemM)UE;{l`xuL8Qd_e1A`)d`Y7xGy;Vs#EolCqRG#I_nTQdK7 z5@liP)ti-5^|Y^KGc5fW;+3H-XaET%yO1agQ%&zs z^3-hlfBtY1Wnt=3%;C(uM2M3~Q^gpry~Lq6kSGgN3vc3$%(#tSaPl7@RTiec z$4J~WUQszV`!+dbVQS8`2}Tr--(8H&;r}o%@|Zb;abauCWf?7MUw9e@M`LiK4CBIf z8rihGmPy}3-$S5X?y`+Ys($oPXhWBhO%|rw<2GVzPgG~@etdcfuQD`}LTPa3MOwGq)23jjueNOI({L=hQU&d|;-OIGq&v|+0S zC%GBCI75e!MHZ%x(UVIp$jQgc5!{8TztN(+*3vrWSGgRWlUJbo7_;k^BNl&{9I`O= zE*={^%||$PkwX@yt_MebDZjZK6O#Ai9#daRbM|^!m@338&Ff?Ia5aBvLuzv_g;w@vN-uFYN_+oNK7=0h8{6P z+qxW8Y8@sN-tsae^dgHaNo~`EwgzzWFCamdr23$Wp888)y@njJB(>w=#7dZIu5~${ zQ$sL!_BdQHwvt1Zq_#nd*ANEdE0^P4b-pV_J4rAOW22NMsfCzj*T*PyIrgdVV2Ap8 z3?YXsNln4r#`I>7E%PV49RH~O7*!01M~_Uk;wylNjpL6@kLj*mh2jM!%4=bS8FnQp z$E^C~m{p&vu<$kveKL|`R(*2ZRiC!|sRU^`_?jm9FuMKc=CzPKS>eeeohv*!d9Cn7 zT7QM77a6$1lhbvKcD+WF(if1JjC*`ek6+MnEVOqKXDAdweRZjLeIIAVT9_KOFg1Dw zSd%c9P%WCa-AG&}@BzPc5)@tpQ-TquMT82v%u-yGK2Fggf z@pgLL+wW2oY3cAb`JnmtZXUd?^Tc0`NvF5d^M(g{yO0d@c6wK(z3nYMgv5I&s>fT^ zRNfHd%4e#dK!vw8A@1$<(C31CQTmRic4&H`PTQ|o@E>dl-~dB9`Ct+U zdm| z{o-}x;7=A(TdLgYcF|gEopC&?b(;RYLh6xf4N=H9HdueQn|F`onR5)et4YvCzV}FM z+VTJ(#tL01_c7A6k!$`3ICIQTXgf*MMt)>@mE+802@#h2vrBur6w62_GHGb$9C&UM za%&sE_%|353KzMw*B(Z+e&I97pe?-flnQG?tyP9@H|HAzzt^NRfCzIEu~6H$M8}KQ z1CU3`+NUmPVm@-@s7`A!flXb+!-M#hfm8e{c(|??z{6j$kmsDLDUQQmaeGfj^KdnX zyQY6pYxiU{S+Q7@b*0VZ$!MLKaWhER1Sf@V!O3X8`PjTjWQ{JCKa@oJ1t+6d703zh zbRkY8QBFpGk%u2%u#CZW@+(~8-JurfA*_}aY?}XOa>&W(6VMA1if?y`4~6~>EdB#I zzG(>69uIwUOoHBWs<|okk(1HM7g*28d5Mt|JMacFZT21MH+bBX>DPGMv%NChT?*V6gu)7@@Y&ey76-USOC zrwKFQVbx?iBH=dIDb*x&HDkXccKmF{?g^yi? zE`}+7V;EaW&N+CZ4}bk4o|M4ZDwuQJ)8s72WFj0^{ZIg`t%F&|eo590EI)?J-{uBL zC@t?0oK3C*=M*fCgsWO&dI-)V55d`;oCP<6v!;@h=}=mJFz2|7$l38aR6a+I!@~$z z+XS&tBYx`i{Ml8U9fZ=33FaL4G&%R51&bc1<{LX5UXNApPU?4}j@wWE(=dXE@k($s13;$+ z@;l>l&^y%2>==F2bm#)s#(}IEJR)y4H{14SAvXJP8{6)SNOdGu;aFE@?!-4bq?7P2h1Lfj}h;rkl_;y{dcm8r5?s9@XhrL{- z-?R!>XM+0sAA~wf^6X6O5_bKEAfik)CFZ5~G7cR%LX2*|6vbrX7zvHOh7)$#98L?3 zJ{#RwWtqL=W}(s7vFl}LnSEjImuvcger^lH{l11H0Ahl>1Bw!gT@nZ zXi9w0I1%@|l=z^*aUdl=XmA`zi4Ph)ev;x28W+w3xS}9*-df#xx9QHi(&ne&X;l-P zwlBJQtt|7Qx~y~2=VV@tI;ONytbvrUoiaQNS?rW4?Q7YP&G;R+z}OCC;O#l(Wn0_~vL{11&PJQuK4HG^DOcgmx$XPO-gP9km-~A-A zrKb$utatjoGCm-YEj{IoeRL-}e?*axVPSVXM==(%+{2s&mL~{Hd!y7}gdyl#B6IO^?p=~{&+t#DHZT)2PlUoV_H-o zlr;_LQ4%>(OFk{x%&9^i8Qyldlbj(;k&^pw)eXoC^EZi{LnaTYX2-|)e+Zc7aHU#= z9+^C}DZsqeaK;vs$O&fh$WwU3LsL5hOer}>-wV#O{=~9q0!+VIB>r{*YCLMAZfYJ? z`>iICqhWICw|p!R-)VF7dz?g$hRNe6)GPTei5&2fqZ9Q&?tvYk7Qpz)lNWHD7f5dl z<|EK=Xe;fro3b%q`lGi6-wpd`$6N5_m2gci|+L3cR#+K|;qKj#{xK>WTek5K>WAyt;w>U5HLJ~P-We@m6 z-cH}KSGj2yV@l4l_dTyT? zZ}AU7LZ;i|^$F8Vcg((UJXVes1;OZ7YopK8MvvM2Ox)r%!D)N9n}=JxVRnsPZ`|EV z=9-m|-A1tnQo{ORKgsk#X8WC-ht~5!+5#}}7N2?YP1Rt44?HklA9NrmZ}FMEf8ns_ zQO)T$kVH20%u|c?NEg@J89S9k-kUQA9>@NJarY21s>sQEbLQX)7;X||jD4C!J}+mU zaVz@^#@$1}d_c}u@tl!4V))HIn0~*I$mWuH&fOTA6J!ifRTxk+N8jKnbGD=StKxeoCHMhheC7$uQgn89dagkx1nWD_$O#W(11*DZ-W z%?w6ya1}GYlE@p(V6-s8IeSTD4>K5z{)X5BtdfQ=dnN}1KT--m_&5oa^hJ)dEs-+O ziMB&-)lrt8DeymaH24Q0&QCEIBu%+&TxZA8?`U$OQIPpj0Yhd=dB_FFfD~Pf-1UhN z8O(DEYANAg z(K=-EZSUd9WzQmt!pWF+HtuO9FooLj{aR7Z?VBW93RCI|ei|-hzP5cO^+3EQ2>9%G zJFCrENFPZN+Pq8wkVlxKGZE)rjG-{4P?>TkwNC#EwSe0&(Ur*NkNpf2Q);49z+dM? zA4KASpA_AJTpj{81(-^(GEe134crqAs3i$GiXceMYFmp=j;rKnn`F!ui zl)GR$6U~_0h7P(LTzsdSfj16{zKqPfKn_o-L7_+;(^D2>?Ko2R2BJ;@-vLsL2P32rCac!}fiwd{sT_le09k$)8Ok8F#4czcS2FTQXG!HJGU zHoxK$z9JW02p?#n>Q~fe={Q-yK8g7T{ZgeS1w1eiZG;(kUKh*dR)1oZu>rUY5!-e`yB>4U|`0%Z2TsuUgxOQ(~ zv6%9-qsvaiKE-6_IRykS=!{GBCR{#aRdwI)I&(inB0APFQ73vI7}gwXFCMe`eM$q^Hj43)VXROG8@5~smqkfXNJcIPQf_nF}2|h@Wf?~Q#{ddF`dB0 zgB;Pe$mJ?yNRMVr!K_@snf7lbhDW5Uv-?6r>|^tFUHA)4k4$e)zB9{jLeK|;%?4q4 zKtk0fHVD}>8V1lpNs-tw$d9$+B@-76?w<&d1$aCF$WIFdYKO%=YGeVP3jp%d0)ff| z$O60^0OY3y0zD}}7T~P_AU`b-D74H}%>uk10OY3y0u2%%3-DO9Kz>>v&}0F!0RII5`DuYbPY93&Xdu1F+eQ4e zAfTD7mMz>Q0K`uV0&%V`slxCx6(6MB-G?2|`9(r>>RFsElCG9r7IJ3)lZj!cYB16d zG1v_{R>7$T#u2GZl5EUM+%B0lQaed~gD}g=#aA41s+$C}9wMu{I@JwgE+5$H2^;HV> zDGB>SLfq=*EH~U@$z=u>vTfBgejnS`=$CW)Za5I%KxN~`Ep~_m} zIIn=zTN+FQz6B9~O(un$MQSG~lM!>Gox#T?XamMn0vH!X7pk$yCSaO?+#=x<8mu)a z%_akEp;{`8)xwb9+(?}Y@*+jXcgYytgcx_NJog^Y9d)642XtZ;N%=V~3{J~3;@kwv zEW{x2Z(*D(3|n&y@mqb283XUxD!-++(Z1@2we* zJ7B`XE8(QGF)?j$SBUe#aZIn_tnRK5%iN3fb?V0>k+C7fcO(CTjKzvGvIh5r^x2hZ z&JuMiSUDvvnybhdYlgTpgI)Yp5%{)*D78rOX4-5@$lOr<9DyC(O#5t zgl3s(Y$zQjmw1DC920%8^IKzN$JdmGX?Db?Fz(KJ5g!R{G5MUYy55W#A!=E9jxo` z1>puTUgA6PW2JOQm*-GVfWXRvD$L)cxS#)5Zrv*)L(}Ocg6Z{vy=_*YQ`eAs&a=b%;6T z=`ar;6fF0z7Wej8EwW>e;ZlAO(0taiOy67RH)6N4ZZr9aRVSGiA@pXfMpt|Wsp;al z90@}sRP1Tw3y*n@i9#8^5j}hbU5ode)3;a9uM$UF!6Wmd$z+sg_qB-f)#y^@ZciB# zH)ZZlCZl=pxe@jW(tKIE6MxK}qFYp*eo)e_Nk{cFMT0u>8<`6g4-VXebdl6%xP)7@ zbsqwy;WXO$8N_<)*vp7b)3Ke1-HTX-x(p~ikQxT3H~29&8qwGpyqteQ9!({%8*P>46sPhUWRv?ykzPXillkhF* zqi+;sP&w1lG74D7C|97~nFRZ5Bk2_fECqdy+ycH65z9%8as?~e=oZu!5%Vy>6ejvy zyTbdAkvO>>F6&N%4|s8+B}k5HVA=Ik-jtcm9p~`kbWoh=1-SA>8wQRk^C-nv5=3K2 z@|>w5G6h*Yb{SoP_$MWn!Z!vgCwdPudBW7Nn_5I`k;%7p_yj~hLq1P^da%)i@F;bp zA_=QxkTYLBbt}%OT!kS-?zL}yVoZ86j2FM>{(zL3>Ab-L>E(7>@&>07qmZ0*=~O5D z=C7#L^=jnf5c}41V5|aVxf+Mu@D85c2v^n2S+2s67k-5+i4L>W6w9?Mb_TL%&P8MM(~5tI zEkX8-Q315nvqQ1T$S3yoz@~#NX2Ur4L&Uxne12N-Ke6=|+fPd!8^^v6*j(dC6U;n| z{eJ8VWIHPq+e-~PS$LPsec0loU!hzp3_;;5cq~6)cU}RxpYM?q^cZCP%Rq{D!1%1*1K!V9Zxw>@dS^1sC{QL8;pc z-ndPC^QjS?B4K!}$2UG&f^U2@uW#Bz3GExwH*s-hlA}f_ z&svVj9u7}CnXgvgfg{b+a3c`ExBARD;Wl_K?*Ab(t^c27fifnw|DUA)pF~po|7nl^ zpSc5!KY${mz_UL8Z;AW=cH{rCqQb=zhHK;g_tU)o_tO&m4<+b-iy4h$yZ-mny#Dvo z68sNr`rl$+X|cCx|NFCp{9k?pFzEB?Qg)HWc){Dh)8?Up)F$|C@tt^LLch%ufA##_ z_zf?=_V{fVQfBlnXMt9;tl!G%w+TpUzs>OYZDs`+Ye13F`fBOKw(4#THN*uNvE>uNvE<7(4Q zd(#WE%X>o(T~;CZw_7=LrJG(126nH~1_NN;axSX(6g15G17Fq|;kHn7iF;1VYF!0;Ix*n>zeS8sBC;0r1juG%rXd=2=v zATdP%uaz*oZ|1&$>_!=jR2}mF1Qtd&mb}}il9aUBj|th9Y&nM|jX9$Y*Fuf%_}BA6 zy^emx)-Jp)HW&#eOq8&BVB$`+Cz$!Q>w3(`=?6S8(WS zmgnZo)_p15=?x{8Vyz;jJvn|vO83i@_G6G7GFQx&(w=79h~y2bY%;ius8sSbAp}`c%~O2eWSV*?_o8T0#MW-ba2HC27Jz(gCAB5>Q2~}UO7m6sP*eAqz zp%kPb3lSgXs;sNz1-lz${*u$E!jziQqZfKj zi_M5*2MZ6iW}UO~Jum}OW|T6VTqRBKLCO@mD!m6?b(8Ka>AmQtmFfUsCL;V*!ha=< zpb!T$GotDB)aXLw@h7C=%ctOnqD)QE67rV167ncv5(lmH_Us-v=>aQUe}Zii$Ex&W zjNFS+gmNcII8DO22t9J$!D8xzAn=E#=}kSh@R+JF@;EpObYpuPu@Pu&#uiM$+yp3$ z>?V_LER#24&I-#LCZ_Pq)lM<_ zN%~85vSRl501&=odU&=@|BUp{OOR%}0-bIzT*@Wf$8kJ3T&(z#!*EsYj3w%V zP1p}c$Ew6w1VU^&;#k_9iR>E@-GS`JYt$ljI})b@0<@b!>$B8x^5$z=>;lANgKzUjb+*AQuY2QVX;&wwsb>at{BbWWHpHi}V~m_91Bat5Ty-x2fo6`-I`J{o-NN za|!ZevvDa~iOYc+0MW@=0?@(&d7a!}GDlcq zqLa&;K49ir%Y=Tbgy|=kxmJZ50S{@TK71BZpF={MPt5E(A4Apr!7u)!D6-&m5IFl2w zZ-nbF2{TG!ZIR8f^bwTd6l|E3IUDg!3|Qbgs~@V73PAD7U;bC3FGfTg3%O zEyS&{(Sw*nCT{UM(R^g`rzB?Tp*N9nE)tm+U1Kwu!YqR)+@oh8UN_HDm;!~zvZI$F z&co0Kh$&EbSU9>2ah^9f6kWzMyNTWpa&@9GFZp+n`CC@A3#3mcOnJT^I%%6CfKSnP z@it_dvg;EElG$1^?c;#uB>YR{8c=>>P081D7&jEi%t zxHI$wC?j?31;m!<*mlJ3)3FZ_+o5A$AojD4eTP`~a_mpmJ?uEY!(UGb@K+Lsk6NTc zVCqh$x1b)!2+LF}Oes0>8-bq|I7T#sGy++GR|0_iv_PN>1;_&I2mtca0)aLOkOlZ4 z0LV`Z0@5d5Y+-9euYLTqAP}egyvMLw1hYrPze6RYJB~p9`ExUI37v76{ZG3oC3O7NC6qke?O^G*5smKv4jYpB4!8 zx&T>#ZUI1kS|Cs+T+6CifZhQ>ep(QaZfUl#Z699O`)NTSPI)!92RWdQ6E&7d$d-02 z^5?Lny&^hWrptsWyG6XE`DsDo%9}zOD{ld&1OWMIfk5X8kOi0-0OY3y0ck7R!nW?- zYWZnFAWr$nC$MD)YgvRlJytvo`FGKZjlsmqTc+j0l+`S5ML#V_oYvVEwvxOcep(QS zGxIFyWlO*6>N~v@*=kt%)1s{9*eD#Cp0VCf3zAcwgj*JCZ2=w(0P@oUfrbc>1$Z(5 z$WIFdS}8yl;Q0U`KP?DI`@t5r7WCT3PYVLUWaDq>!{1qXrXk+hM9x!q;d>x>R zYuxjLG1PVmX%uRcax*xb=))+?myVmI9M9SK1^R6KN_9IJ^h(o8wEpRhzMR*eU7@&9Fc$VF5JN zOGa9|ZAdM^!805y`8J8i_w!eUoZA#u$elIFjyb76>g*+I!!s}u#1w&)`cqQuI>ckM zaj8_zuZNMDy-3A6ARcRj%Q8h{)FAta%VN)JzP~XatwToW6BSEYq_x7M>uUh!j|D14 z!5bxnq%4T%=T>jF?Z?*xU9r~vrN;snf9!-V;K|~(Z6wd58gFA+LfM` zGGFa~0sBPL&~l|?-+a)t+ecup_=4g1TkL}2k-EfbtL=i|k$OS!QKT1R1&fa%T4(J1 z&ALu?!S6uBN9nV~ZEqzPgTp|uONRF&n@fh3$aa?uR{#P& zehoA(8SXW-geAk@LF1BP?+RTm=36qXVYa(u*ylNO^Z4;cQ{_J;^e!3tX+fjx64kQS zJU*x4m#5)lyo)8nkdwGrvJ6tWSkmYxBQ;$@cd_IHQOgoqSq9HyiJumvR{0+Q;$lfV z0XWtIc^6BDOXgThOk6CfQWH=&4iQmdm@OgeIT-n|zPRYcH9tr8X`Yn`IBqwP#(=rf zSZRs67a3d=sHKsf@vI5lCo;B3=svla4(*Hw8|4E=zel~7smKpm6*)#Wm*H0^+82bv@GP2teE!>i3WZ%u^>6A}} z0Is+U6LP792}^b5tA*=wp)Rys-sO}k)egOrCR-uo0}`SpRq8ij(0pqpbDM+-rhL># z^q4XO;56l2F`3_#H3_DyQggshQ(7e&%TNJjX40g8TKSUiHL0KGn$#&@4_Mx_EnFA3 zxUuVhv}=`m1#GOXC7&y7b}`8;{@C0ZLUpTa_O-^mh%S$%7w&6i9mSO1*UCC-9HZRf z$U4g1*UIV;-`C3O$hh9u$~w;MYn_ZDye*$5;pq~dgOE3A*-vx#wdSi6Ud7Myt_9BV zeR07)KsOWS-zHMC9@YrEcA@(oQv0rdmtum3m6$Q&$BXWx>#1*>{v(45*UKPrm;9KO>TMPR|&w~=$ z^-k~k0Zj0o5#hrKG%`?l%i9kjyylR?Yfmu7c~ine2>;X8oIs-&DOUXsNb^_!ulgE5 z^+8)5hqI5;rCm(NjGQvNS(4B}>Do!t|!OpDs^rG^P5FA>eRsdknU2p^e z1C2VYvwI?PhtMR-w+h}nQZA#bF;7;vDs`J+TiVBd>=|rrHrJL+6m!bm6jPzHUom>u ziQc~p_MIYcql7-a^KSC%eX9UY5Wr#JVBZ*kG!5~P06iTHIJX|)J`Gs@rr-@f(HP(f z3E8~Y3$~?=5t^&4HiXT!B@+$NVFL{@lfHXRirWE}wvJ#drTUikUZ4n@YfC1In5IRT z(=H3u#MjVU@Tiz4!+5y6so1^XTBP`V`6Xf%>H$dBk2o}BzC7a4vAB8o+DxB&La4za z$2mqYizE!~m$x@yO0!LB(p!q_C!`9M3w0=_4A~;4O8qOmX9~_}i?d!@J-?F^_xqou8K#Z;+I$Yd3J3r>HFmuR$M zC}XQ=H2EJiu|})(A}$cbwSshiFk-C-(QCBn@0gvL>VtiNkB!z}OnB-hyE8LO$G9`| zu#RzO<~<$b&deV=#+{km%`Cy4nNtueU$Wio%zQ1tM+mUpnHfu_OW!chNtUTjn7um> zep=uI$wLBU0e%kv^3wu={t_Sy;E38@jr_Dgpnj-7*AFc~Bml@y3j)%sb+)iIqE{O~ zEeHfZTYJmw%ybhq=6Kw}otXu6$M2%EW$GpEphAEkOSOAU`b-Xo&z> zfMEeZep(>VI|5_@&IthW(*l8VVMzMb0+a>-`DuYb;|0h9ToM4}rv(DtFF+QcEC9$) z3j)$D%@(%p!|QrKEeOOZKk03=GqXa}cuGRMGjk7H+Snb&ik9hCVe)Qa_-R4n$~Oy; z1y~yZ|P(>mM2R+1OQPYVJuJ2STQAFjT3XXebEMp?`8p>TM29{jW*Ipwzt zkOlZG0LV`Z1o}#VEWrN)fc&&Tpd#Ev=?4q&V*rq!76hdIU<+FddhO$<1%Y5a?HlOB zS37boBD)<^dMzTm-4Bd%Eh4*}yB3jsTzoAeTmMKvuSH}ZZ`LASLlLe;?3C~$3HKok zUW<6*Bdm$f!#bMGM0WNylh*@ivU=TDuV>$JudGF6k6?kXnbew%pk~M4XV)UK&opZh z)mV#o{ayUL33BT&naLg#y9v?Q5?-#sWwoltbpi^OZARo4^*FBKs!CalD8gGivpf49 zKJ{9}dnXzF_DN`;Lw#!zNxO{jQUa9*3jbey;h1msm4Y=acsbsD0KG1|AJ2t8)n7)Q zPj!?|d(Vi?!@K{orv{20@IbwS73u{@!(uUTU#=5_KG8(zS-+1&=7wW>KxRDS`~pMSgW#LMyuHp25D8Hss*R?kT`iK8=Q_3 z2IKq)98S_Ji8G^0y@NDsxKm1fY)dEBa7Vow4yj=c%hTUCiy_&k80oDf%vw z7D&SmSN2K1_GlJEdV^%uHH#rDfo=}~UL_(f7U)U!L_97c$__1}x}Jzr9*&FnKtwzy z&|B(>Xz_um#g0RZz~-H=7SkSeMc@bjK%nJ|z?OYdl^TLHEq_pCIlYYq`52_1sLV1{ zDN&IXUmweQTw8vp;5CwR!z3j3M!~kUEI+nh1F*TaWTKccM$FHE;;(5g5{QXbjO*Yo z9~#qC>WT$AxVD~Yo&vf!xI!%i0!>pVrnv-Znr1{FW0Fx4`b?9DGv$doFMm>-=6k`r zM9Ni5$OiwCU|ZT1er#EodEsQLF2r?h(|}cN>%J z@S+wWllAH(Xgz{a)2jWtc7Mj`x=v7sfkO;kr4|b6&w}=EFzVcTs5c4fpMqNBMcpl^ zVe$Fl68h}Aw;t*z8r9r4)9&=>*%7UYwNDeIX2FQPQ6|w-?Ve@rd5ZQ^(;lm(@%Uy!A$Ku&uD8slw#fS0Jp$XgLb%I*yMU4vTenC4Bj9Oa{b(5feBB*s<)LKFP zSUyYS1$Bv_?(m{c6x1gK ztu`3dS?AYvvqm-Tu?SRG&n<$uRgks^BbL@fJg5=T9`}BMnm!4y{c6?>Z~DuGy=vJW zd4lUB#41$YUfdDER|zP4f~(E*Fkyx20g^t!B_u9wFE^x@KQpa8Z@nqS@%8{n6>2>S zX|sbO)iJHA1{sNcXXOjhzMmBxUz9MufQ$WK;L&4m3hjN%W7}+nY6WE|vMw$XyMoAM zxxK>wKYux+Ro>lxtv-cTto>9$9R?29FJ}qrPlEPWFzVcTsO?`Qs+@W9BHkm2Nx$_0 z1J(g0^tH&n^$_Q4L@4?>5O`KCZG-Uz&#LVuu|oX=63?oY3W;ac?tclS@;0BF-S_83 z+}je`^J-^p)c_}hjYs5a1b}{y8Pd5RQEZ(@>k3yS#g`Ww{!8seb6^$Rzmjvf3F!0y$$%W)N`W1QBPcFb8HF_@JMJ zF8F={w+xxW;DYNuW^-+EQsa~REn1EfJsytdcj(O7bp1+@DNP^7VdTZ6*mKQiBhD|3 z87}?$7R{A%4k?yv7UCDqvs~tkG_kTzC&hBDMVw#0F)y>#Bk|f zRBEowb4amVKOoNU2zs@D_d=XMm_Ul<%K8>(3Vqs>D`hY#ma7Ni{e0S!D|-Mbmg_>q z`OO%wUgXL=l@!ah2yuQi(5qLo=U~+tq*$)|5a$O1y%+rx!ysX9|QDqjmrjC z1u2&655)O7N-tNtrQkY^6wB56JKXDiSPL!%*BnwT*QtoF#z<(e^sqODdL@r1#d2MS z_~Sk<>XjTN#d1|4&d;QIv8Y#aA5tvWhz1Tz_5wu2G~2*L-!@ zFW7XLj&*jKMlEnmA3TUAA3x>3{%sD`VLX3_JDE95?d;|1Sais6XOpf$x;<14pKQ|A zNKb)M;Zsby68j&MzBjKBsA0KIzPAS8AnQud`R;)CM3=Aa)Gp!O0p(wSjYDvQzxz1+ z%a$~)1|aR4tkLV-Fe)jc*)BlO1g7a%NCgvi71 z4N}faEOrKpoe!>IpoLG0mFZ#^zxd)S zp;|8Ai!aJ=1KP!qZ0X7Wm{uG;p=$4Jqrg%}Wf#zOdMsxkp03WyKDd!-beYas^4zC(8GbBp6=Q2i5d9sZOMn}}0war_L)V-s+)e#TF6z}r>I7h9YWu^q(OW^w!s$)mkE6>2(wIIXSW(0BF{ z)e~t>3il%I&Z^8~R@wvpiQpCQ%0zd%h`(CG_(Zoty&@8Zp?LW0Dz!-@ED+ubTPATb zJGY*+?U2UFZ1aPnvH<=ifGY&(wqU@zdVmGH3}93Mi)xI9gOR2Udi`gdeVT+mdz6Ab zacX&`06r~%l>)d%07nSW`N4p-^#GsNfMyx3PQbnqu<-&iDHts8LBDoM@3FdAC^_O+ z^zqBlpk?+v?+i0d7y<@8O(27M9x43;jpl{9NH=_6p7)yTg_0PO<_8+hJ8)8#6zx&@o03Ymg{sl_c)>f@^7K36G1b9|LTz zEt%-6-?a#*pbCaCznbZaTBaanenY5j;6%?t=9$PevpAhOW%x=|?`ZvYk?1tgyZPwF z@Kp07HGRH1`Zv5e^C~Pj%Ump38C!Jxz&j_q7%B5c+jT7P0F!*U4zJ_G_3udDs3we` zG#hMu~5E#xeq+ZBPLhYVN}lpt7t0T~etRtomSws40Aa|&KT zl}%gd3#glDY5ihGU0ZaV+mQPNe3>Rb94O%~l#kD^8+~kjxtaxvnAE;pO|x8#6glPW;u0Ii6Q1=EPr}m?N)Fyc0#J!A1!ml<;YULC3nLeh0z@P%TGZov1C6@O$5% zV7sN$FcOJ}x9r+l;6L~_ zAK-=oDBMZK92hnB3o>p|ha<{g&Q+)ffp|Jnm6X;eHjz9R@^TI?tJQQ|2kpU$uX%{v zqAGC3%lzb&+YXiqe3@Tb$atf*gh9*8xY5t~+X#2iS#nMb6u!N_@CtQ2%I&!e^G}uw zRv;`2NEoDmY+h-MDpd+hjx8lpVwf$NIBVGp_QbJe>?a`(tQG2SsZH9W(CYNS+H3)D zpef5(Ca@-blT1y{kT9qwvJY2HkfAvL0>`r|Pe_sHZPCQ4>;QXWReFD_t5T(|1Oh)! z@tTy}DPg#?@_e1*R`ojCgQa~G=PADz<#}-FJIVV+!W{S2bO*pe7dnqY=ipb<6@g@H zYfh%!2L0^Gv?l)=5xtU)eNK}wF5(PuP(r zvgS$XznKv=aY^zE8n*nf{|w$zDfhC~xl@(eE7+E{!jG+|JT}*sOcc{%pB8fr#L)Vd zX@?c#T7M!qXqvpo4{7~jASGIV8qnQiW6w%e=4ym0bqCTk&0{Hj*&CcK68cP2L(~M* zWbLQU6{<0YW7>Kz0nv!-1n^}6+7S$x_JkjBj0QAqbC(qQQbM%f@(K}T^I9}88s~JV zQsY5mEiCOvzfkQ2n`=uZR%nBkucx*z2(-O9<-pIchzLu2Q+QlMX%RNpmP`~;r$v}q zg?>B7Le(uv>0=MoG9}u3j;U4muei&>*VtT%7)}8wW9HtlTjo)oU5?6nhI|tR^zYF& z31*F*Zh4ODECn7dwnK``F2`pg#(B~eQ>9)M-d%#T$KoZ9OGPM?Sl=VQHf`j@kWs$W zi`pES)L_4$9SBCPtcN;9P;U^_S}*EkLH$k8{tHH}tB3lQpnf5!c~5yPuvJhaXB+RN zN$5IX_B&A~(E^44GZyF`HWsM%qUIoz7Wi4v{tZUmQ4e*Ipe_?s=V_0wR|skYX-Q2a z^yylJGKsqG5Y#UPwbYCHvY=)PT5d3EWj)m9-_Qc)o?YujY>t-C+8-%M1;L1Q^$^Eu zL>w)e+Q9T3+h@{78SktDgXueOBUYhmAc;d%-ZQS5jK9i|k!XVs-%8*4MzpE3T6y}; z%c6OW6qml!GTDgP8y8ch{uAD;bB%5-CG>eC?ODGj)1V2**C~S98ywQ{tJImur0tFt zw2r~3W%W>Z3hJwZTJ1&MEU29Xtw%8Gj(VsazcaeFPB9j6ws>?^$fU0Q1Z{9IYHyTD z^u-cEy-HB$dQs07)VAjtyB;T@&#pD~Q1=V!>w>!1i+Z1+b`!MT!Ki7^`E@<+dt-qk z(InZ_dV|CDRv4MIz^Q^ZBp9`<9%{KpHGQYri+H9W4i}{Jf)RJrLwsH%qVK3w=!CmV z?})KqR$~+X1_pC1@5Te{JK>1wV@@p|M&!k6b)Xof<{~%YZnPH}iC#SV2kBA$MAASB z<4civoN56vG*RC3ro42iGGx=7!;tNobGYz6E%#)!CyVN#8t?{DX zBdD(m+Rk9qz4cI^5Y*hmjIL=fcy#?&P(K#5eZi=`Q6|xBnRQ0jD+RU8i#igSwAS~6 z_G>U|bv@KFLA?P0JlD1Z9P}#BwY@=Fg}MhMp0rDQ(U5r3uKkai^dd++X;&&F9`M^_ zNS}bjlXf*i;z_&lKcQ54QbVOr+BIz07skgLl!W%A-B+6H7~%R^sErRd+?K21&su|1 z!A3F8OGY`0>0n5=g2bj$1d`iSW+9V?su8#+CG?FFr6`kV{%Hh-HGUM-N-yd=g8G7> zy&jBOTMzX)K`lAl=vwDREkGu9eOJ)-1f%9{_3IirXmlM50DV{y9M-94kXE5)fkday z6%w7g$dFcnM5opWiB5f%Bs~-E(x@+;=B$y>we0(%t!3yV46bGMblB$F;)zCX{tFey z@?B9A2f;ruGgcbV~i3bET8%gKiMw}}3 zD~DG&^D!y)oGqO=M)d}JqD|KPPMef>0UjR(lg>AmNR=@2Bzr*cN^nr~GD-~oGEz0s z0}lwkDC+Leg>n81X{y^p#PpNU^`@CGIWPOw{X?r;p)wmw?*m6>57b`(hX~M_!GNXp z0MGn`^{-Il1+Y>8OOU399}CbI!GN{(03XzVR;&@-Ucwe)?vHx1U zuQeW$vTN`isNbGzcMX`ru0hRt*fmILs+>Pdup+DXFyxcF?T~pgs3udh>#c;`6G^dK z2}2Mc<>TUcw%neiSgsnx%Y9tjaLDaLirxgs?L&3V7n*X5xec^ZRceTpYL$f55^h51 zS?(W>N=4hJE2kwzV4ETLNT&2QL++6s7^Q)8kE8_sg{EA+wP3!`lzYrbGzk6c@!Ucq zH-aMEW=NB;nS}WWJ#yVQ*-ae*!fFW5m2C!Xt%Plc$8(fZ318-k50_^fUv7ba;=Y{x zpWS8%E0);6v~C<>y{%A(^cuzQ4JRvpvhq6f!QmA344B(HVeC2RFAO(O=HsG!XD?P$ z*W>ORQP0CjT&b^8Z8Mbq-G3iZWuSz3D$pOY^ydftTpu4X0?PYnLp#tZ0Iy#HV39@$ zVy@?1+kRRQs#E@w=ytXslm;OAX+cO!(6N=nFKs%nLcfMee_nH$;%APFkR#vJtW^BA zaVau9AJVK+V^Ee~(!5BdUoIhW+)ruxiQZ3X`e~j|X;!IgfX?$}a|QJ_i=L|VCJm0n zZ3GLyplL~)ElXqXM`^0m0r0W&J}mTCEMJ<6wLrc;JL4zjc2C?3>Xe011I)JY9t+Y` z#d=#1KQZ@YFUS)0Xtr{OgSnFCPFJxj5RYAq%Ps0^T>0V43Uv~g9z|*gm~u;E?~vye zUY^5cwfY3tL1@Hf|3l;!^$V`tHp9&vG8-X1dcn3G2@?W^ndm z*N@18k8c`UW1GG9}7(0_IfbGBr6WnA;h{@sLsty`=|y-aQkSRZj91Z!b7wV%htiN zdD}-lV%+i2AqE{-lO0-zC2ADXw1@L%y^0h`R=gt9q#}2rkZz~oNN8x)EaJ*5|4jJS zik=ThnA<7doGtBMTNk%@g=*8>7;FdS_+PVOUgzMv1&Imow)_%G+D)^u0HC6O2zYXa zv9DJFOA84tu78DECkhln${`det*5|_Bv*k7^*Fs6JXAkNNuAVrcD2l+{Z+@aXx z%RohsFCQ;}CuSK>Un?OsECG$Ss;&>dqaJvV2yLYwOiqfdCX46P_m!x0%(t!~E%)^?dqZOy_7pd_BYB5iaw zQpqOu|1Nl^n%WCL&eq)3zO~xUSiqOq%}|}vFwFO%Yg6Z>9Q9kJ=)SOyG|W>EFLZn zSI_sv;p>yA^PG0gTfiNsJOR_1oZ%HskkM~+8x+%3b=qx*s0PE*P`4jZP^m!z^WV|7 zkf8k1-pw#5*LTM&mrzcr;d9dQ;<0G!BdrD1@MA&jdD(7w!`L)5yae9pIWfK1<|GZj z<_P3m-aBG*BEwsvarc}wthLQa8~)4T$eH{m>SJ*7n%w|<^*lzsfI2kl4e4Rkxo<1w zoQnE%$zi3Of)kK#b|Pf=9H;JSum9FKNBHWPG{P^)NL64lrdSYgIqg zscU1kA2l?2X=~NhS0FE4HHY#h4?m5cJZ`GfHA~Geuz6_lk6Z%2=4$9EHV?lQZvbt( z=BkcvSx4QiPC%)yd1^A6mH~7^cdJuTtZQ4f0ewg^lkQgMp|N#sr*6%6Gb4Aaci`Nv z$EY7&IZoQ$>R57iP}hMb#qs-hny>5e>Ni+YGNI--@JrXuYIz%*S+iEXPBWgU(&1^t zIPh+@28pgGsejP)ORc@JFiz4mX$eOqv}z#zxxd+@}`b=x?ZlL7$QtwXgxlf z?sT1|j&EtRB5=l4a7Ik+gCh-FTESo_x?YXD>Pn|;h1!p16FO2_$9R0mxNDXA9n2;# zv_ahf_YCY*jV_XJ`n1}hCct(BJ3oo`AbH^X0%jW6r3<_vc}W}8z2F$wbzp{@7uldT zGw;OhSOO3}{DA)@7;Rv;4H&T{FK>go5zTC1_X-SslGl5K>I@GKJm~{C*W@Lo4nG28 z-oPIJ=Gd&z@VoMm)3ZH1DNGww703g7Z9q)~D|OIssLY_|DXqF<{MPjvvB26)ps4>^sWO$s;OlEkEw^fv6!_^G7iGcfFAtQvx>lrBlYNlyM}0CL{DfH_S< z$Z61EJp3|c>P1cyCz95ciAdUs6B#r;XPpkttJT5iuXJ!hxFKm{a~Nb^v`Po#Uq&yA zWKP(rgG;{C!KGoiA(9!*(ZOY%bTDzC4lW<7gGmc?FnNOxro5aK&qqjWH2whm6eNe4skWzf3+P90S4(ZTY4I#}_y z4sL9LCK73V(*PZ;yjTY}SLk5XW*yw}st#6vrGs1l)4^?xFvLY#uj!(L+t1g*9dmS0 zeYXzod{GB?eXN7E|LR~}E*f&A_4@8Q*f2^58)xZY^Ew^ew@n8Re4&E}9Sk~=)(_?D z;Ng>X@W^>OcyyW$9$T)1$M4s{6T5Wq)_>CI(X#{9lZLy4qp302d^h%EQz#!qm>TcJW&Vl zj?lqJlXb9nsSfsUKyX;=hUz2?QHQlYT#eCTs+x;1l6UD{I*7imgGt30v?F0s)6I+*se4yHH2+#!+|YpsK;d+K1;xjL9VQwMX`=-`@K9n5=E2iJb9 zgZcmIU_m}c)JWdKemYomu?`l`*1?i$9lU-}E4aNZkDPD5%ev+5n6FcB|DaPlpVgf2 zex!q4T{Z2!pLFW|Or82*1cHX6pt@@KB8CLj@U1Sm9=;s|3^%KiI&V(hh5=ws>N0d8 zCv^elhstTx0k^-U&{Q=Z!cNrSzb5bFz8PxcsI=?Qt{a5aX8=RGX*M_&nHpf6P9M}3 zsSMIGx+03@!oeU}Li$o_fb6VA$0Cw-Cq_b*)tMp>e-k*XkAgzSoS3>Bw=|V{F{q)` zJJD*AQd@V27O8!DVe>F`7ZCGO=K`VDNiFY*B|nv#gd5}BQ0j372$E9!o`N8fdN#(C zxoN38V2`|L?Eik0uaXXc!oY~t^C zUEd$ym+Q)zndiCZnP;Avd8VAQDC=1f+yWacwSwQkP*qi4(a@hH#F|Z1XJ+t-QfC$r zR`4L^0B^7vX4^opYj>*iH~e3A@aAqNV;5wMZ)C=+gJoD(h~OF+Oa>q8!t58oir(NV zIDA#gSperjJ#Xyc;7QO&1UoBmV~k}hco7s>(>C0V#KKlaCX5qnX1&rL z8*V7W6T2%EOq07RhW@Y7W= zqN57Nja0#u^HeZxjS8mUgdn}CA1mN&2yONvOyTXexl!njbFq}#+~_sb5WNFkuL5S5 z4Z8uk{}Nz)V}m0ngC0zpNM#yq9FHZCZwYj6Urlih-u@Eww7?6z4!R5123z*>y8ann zKl};T^j82Zy$7*of4q*f1m8NDTs}Y*C_~3*8~s?^dPQjBi`k8!`Gy|^Z~CQ{LbqO zFOt6HCR}}aFnas7L9A;VjK|QGX+N(<-O}3b#I?ag_u`tq3ruSRVl|479P|G|@g}Pw zs7Cp>AVY>wXI&bqJZD5 ziK0s&FR980JNl#aFxk`REJRPs30#XrbQ`&c!Ieo_2N9P$FcMQ{Dp#Zw(pqKFY025B zPRd+%597tCahPe56QHClE~w<3Y`~sTIkdCW72_&P_5vzJDE}zKEJbAXBz+31m(ou~ z!@WRIJT1CqAScitq}-%~)H9HjYGJ7OlMX4Zc`HI#KHsR$_%+5tO7tD@Bf5StWc$+4 z6Pj*N(W-nzH>&7TV%1X8w|fZuZM$6%-)bI4YP(CzF&oIdzp+)3o znq*06C?=xxER!4BVbB!`4{v)tw;IXV=e&QZAu9m~p{82Xmw=E&<+a$2ZA!MUbJ z^Flw9n`e>>LzU=3=>>8Y3}NM76!K83wvv6vlUpT2^O(7v91gZ8_i`C(OEufe8z9@0 zdxZ=QVD`@P9+h4xLzgk#Q(ma>>t!fLgOy6QuP66f8S2P%nMv=Ip&m>RFzMT5s5jFC zO?s~kl`&neJd-E)VHrBu7wOZK_MY5>G8Co0gXKf2d~7FtEA!Bu<)IN;?qX6=1o2a zOD=4K^qZCpGlfjtLS=EUB>zHDX-8O;VMe%jl7F`X`EAQh=VnU2i={={yKZ_GcUN*> zH_+d4!@1j%qYFUZr`q2Zzl8MfV7j#ZE-_(}p%^O?;nqxE%B-oD%RN!Lu0R%dZE{pP z5WlPXxm;j`TRHg`mfLBQE$su@D9T6ai?Tt4TRmyPEvE^YAVU`t;Vw{mp-@`2ThqZ% zRJ+8;X+(U%q7J$dB%>}v5kysmxPz4QvB*nXXED2N3vMap7*_NVw~gA#kI-_>3Y0k` z+-b@s1Vvod5KXxClo>^!e}jhAzMK0{xtVSEsS)Th8h59100-O_-B2n|7I&-CY7O$% z8nTwxgj-m-iEX~o?mSJnpOur@-tDEXh?+e_ggadMBvy-QU0k)%Q;SkV%|Pvxw@TZi zY@Wq^upD{{^6aZ=X%XSRSU!doYuZ`Hur>9FaJMXXqB3byO*zb}+m=;oFCt$N@^K81 zU2av#4U>n~;fY*?TWz@l)U@3OR|AP~11@*4oSF5Oqa{VSL6`CNfZgquQ6s33d-tzU znTe6E1=Hc2B0Z=uUC1AqNEPW+prBzdq634NMKzsi6h? z(TO5w=+v-+olFhYso@3hcSLHqPK_wopNiB-of=ut32r$uPN&W+xT-Bu<8^9O!9$cX zQKv>1Y-OrKr^Xb#O_}HD)YyWDDDzyMI;)@^WzN>AaRqBBbB<1p7g;S=vu7R zsf>a&_^lS}bShGCO*^F4>r{5ZtHnrNO&@`keL(~C(H0xoldRkhBB=ppcPtn?c)5J^ zO^sq6QyG0@QM0z~_n;CPVPsyYw_?$G(gsI@Z}F(K=`kUhQ9?_T}0B( zroel%(osRV4Sl)AAu?r{1Pm6QFlDJ-9q)^@*sqx+fXPJG5awJ4PtoEZ-3%mvc_xA= zvqu&Xv4RMxmO?U23LuLJQ9^hiC6}Scw)j$Mq7hjoTgR1Eqm)1}vI6fs=X?R~TM@glAgvK9bu=E+xzYHbX1v!DKhg+wWQOtjCQo-!8B}x2+#Fo94;xdi@_<=C=aNbLxCd;3Co68 z%WP*HB`FYcF@mU)#4|y;7cHGR$t{UXHdnC`o*ByiwBMU`jmwinc{|*8=KC(Di127p zev6TqxyV>VX{i)6M&q%gdE)uB%r=BZw!T@@;B)AW=($;Gw7-$A(+!MyQ!D!lP1og5Ce2 zx=bFR%C1<^WcDx(z|kC5eOI+&7SC4YTR0lXTSfvdcl?Q#trnhZPU6Inck zm3PDEXWm*K4P0&J@;WkkUMs)K0q-L=m0M7;O<&XjZu1iqa0C-R=Ut)Kyu2(|j0s^k3@>AXB zB-z!Ua{{ze77v=`ax`7$XEkan6N<4#j6qU11*iJE{L35Xd-z=0$pJTaFuS(ZlM}s0Vig`z}QF~e{?(TlqqgRK#5#GCyDxc5y+Wb&HQ(jTX%KQdk zC}*Yy&Kis=V^ODpvvn#U3Y(+1{H1^wYT!lG%>gdbseovmSAydHvI>CD3AD$x3-7zI zKz$a*12GX%w<*GHfw{k+35`wM~-kg#5VNTCkBlSq;JSif)TO%JP{VLZsLPU7` zMjjx2v!u%QR*&~{^~r=^DtA;VNjT||VpkK{UZ%ADyQigqm* zM1(h$BoBOZ&Ud?NXN`d0ZYSd2VjyOl?Ckb>DW}F|6W)rF{ZMR9BahZpS5j41)wF|L z)&cCFv;`x)TO|tzfilL@Hgb*vL=)b_l1mBy1^ZYY>WD}LLs(AC25q3Lal$0S&YTJ( zyxAqsCuFdzJru-vNZtpNO@PfAE+-(Pu>*w|1aFba1E&D-vFRe(yu4>7KV_$0WQ?R{ zUo}XS2_n2#CTDa5VxBRQGe3(6@1V)s*g6HC#GxGF4K?{Pb_Y2-&DhgrrlKnH<36BQ zy6EoubR%K`+;YwpW=yJ9bJb@!5mRx=887wJ;~+$29+^#8z+|Z%Lk+XZQZk#NC35hE z1Y@18q&s`X6m&81bwrJ11p@Lm_&YXZozwdG14OOI=xrF3 z>tTfYQSTC^j1iJwqF@(_iiNpX7V+PRT1lN#<>|fjd7%u?r^(`BT;-1#x(%Dj*ZSJE zR_2U%AukB$->`*jg?!p#%%?M-BB#tfS2S!Xr=iJ|xq8aa(9asSmOrClZ9r9wC{}kL ziq$QRr2DuYw~7PH0rLHXao;9~*j_iXi3>`4AHTr9~e(sQ}!Wzo9kUT#T5&D%7qx@H|9B8O>4A?j5lFxQ%vg z-7%SDOm4D7fGFoaid!m^WC{^5YNkV*Y-suxcwf04Gg_S<3!j_XH(b5($+&4W2^3kqu7mhimEW zt#ECMPU}yRO|T-Rtvxb@_p9+lnU=6noxqOnw-%1A1OtrXWCFIUR|Mlm~R5Swese(o6G;+L{xBTtp zbTksjpDk7Q5cVli#!ZV{9|x|x45Ope#;C5OfTjBg^uXc!d&?sj+jRx14psUdaUY|* z_y@=wcto~?BUG1pf{-;R%s)i#L(!N+X*E5=Dk)_x+24l2EBVLBPOxe{kWxZs6H*}` z!3x2KD4mt3DVs*fU1&1@`SNF1K{R5R7g4s#fh>|2_O6kVMF=mV>@EjVDO+QIU>Brx z=0%jf;y^Bwo3OU9A*`=H0xbK3kQR;cgu)^Dx9$MCk0^beizrLQ(pV1Y3E=(mPb>zU zHXR_JKnL)zlJTezgOrFSJqf-Z{?2a&?6xl%X%q7vH8eeTO6E9>2W^JBO2Z}Q^tD^?j0Mate|SBo?>$3A@1Z}t5vqU2w>yaAaz)?T z2t9@EC*mv&0l(Lpj=smfWJ(e7w;X1&^#SJ7=6v){66%yI#f z$?I{+n{9oe<__0TP!=w4VMF{2tryUob{neB7(a#F4baxV*y>W0Abkv(?`A{#Co&BO z(zUXd&nGtrt>E8meHM4PBEF7{5W0(hyLB&&MaSjT^&T?6ZUE+;))I6YPJf1}28oEj z<{%!m&OZrX^i+tNdWnI*5HV??@V{m~20u_o#K1H-GX?nE>H-DoNQieQ;3(!l|NpFo z7)0)_t2_4?2l0=^wV@lKJNG3H!tXi1q&7l#?rlU|2Jw02GM-u+p-VaJAjZnG(Kzl> zh;aFh_%R3DPL3|FjddJ@h^JsmR)zXIdoIKL?G~vOFR~GN&1L6=syPlql#e3pOh zU+)=Kufs`2lPazJJ3U)5FS-S}s@9XRXD6X*2Rsq3DfH-9Gnv*)H{fIk_nxO=LM@dz zUDb^a>}Ss@7_N0nO>D}8c80uJa%!la46aIlVPpMS-nTGr>Im138g6G-sgpMZH&F*x zQ~h|{!S?gc#JG03PHh5R`$>egUj!?i<(=N*Uqp)d6%K5sw<{`>C{4t7JBTITl~@}m zBJ>I({sv(WW4QRYc%M!fluCP-$%9zSYwSJV{`EDqgLU__VmnvfYPlx;Uuxu9J;kTn z8T{{hy>O59TUEEf76<#2cXdLCuTfvcw0)m)GI)K56NJ-5x-E1U`^~|o`-Y;&x)rIx zy1EVSE*b-!>`TX-^6wc$e1wB7@pXbHuOI8uoJTU+`set@!$Z`cp=MbZf6&P=*Y^t6 z()DLh>T6?u;bd6k;~_}>8JwDncn0dIRa)vR`&VPuEJjODWd^RX@~W+9LtTQsLagaC zE^kn}m^ox0mVcUzXH6L7J=3?lvs%XR9~n54SX@ zP$Tke8|$w~Y7cAGixu%@HY_jTY1z9r%%$KRgauQeU{%ts(AzD|rQlmOEHBfOjdhnB zmZ;zf8|&YibO!c;^h)=Tmezc2G6?GRX+AM7Wplngou}TOrU1Nus1Gvkn*yK5a|~J3 z0B1Ck4`KKp$wVH#^*WbeQPhUK*Eq%>JqQE4c|CfOSB**6e~54Y$rK)~Nzvm>(*MUb z*;8lo+{LCZApHgu>wk6%7pR)9ZCJF9%s1K7ULOsSoc8RDMGue{fN=j?voSrmcxpB@ zQ{uZsWn;~m7qaGKv@30DdWc5#I8IapD)C7=)ip|}(~t6WR{HD$h}bQE<7&dqE$T#W z8`Q{u>>EgNsUdpG9axCwjklVk=eZbKKXw`+Vl*k+);xlo6PN`;l)a(%EDVVvoEiiQ z`w#MC1Q$;412WIV@o4)XzPnzi-d)!eIf(DB7piyHHN_glch?KmyX%_b9dub!P}IBY zni3eqch?KmyX%?~8pLpnvy+u(yUpc@Vsoar>697a2&>x{Ud0a!WNQ>@{Bg) z4C<64Es#m$OFQpmfYeHM%d8>sP546nCOjW}C2B$%NJa8;c*O_9iqvQgN_!64O7%v3 zp?)KN)6I%0PM>}bPN{EU2jkmSg^Oj%n(&7}mpgpx93a24G2>GK4blGnp_ML?>1&thOdIDx*SvZh?k9HQt3@=it{EBvw=OENv`6i>@3Iz&J>unK=X4I=9R z;vW$3J{u%e&c;dHdFU`wXFfv?W7{>8KclZI6^ArrvC6u|Xdz=V90vmdIV` zk9g-_o?za7_&OkyrDe?cIXi5bY#z}r6{}Tcr(yCN457kud1sVZmmQQI8U$b22#pfG z5Q1_7mAFRPR5<}ETpU zdt_x_BEN2s!!nAzP=g|5xErowgvB`eSK)K4wu>_|-R8W4Gfu_87|)`?Uzfk3t8yI6 z6u!hKsoZra2lqdaj53O6aS$E;es6pL0@~&mzJr?_@aBczN$t*sr~iLZtHSRkF?dzL z)BkuEq<=8A33>WYfF~W@U& zBz`2KP=6%CCh;Q?h592AHi;jJDAXT`uu1$#M4|pjgiYc{A`10KB7WB;mi0pu$ZxQ} zDEw2l#)yCwl{XM&j<+4yjB35Fq!Du!%x_zi3L>=cIo zu)`15@O9SL$@-0=8#^pBHi!J+WD7YwRjep$%-X178@ApJX)LFc^>N6>Crg_TeUpitz*o3N=c0GU*q9V> zrif7${+|gz1B6l1ivwwl4nQ`M-KXa>i9To2;@_r!iOA^ct_N@Jk zUqNyED8rD4C!@>#4DDpMjg9*8;|tyN#}`PKvL8Rb&`p1Qfn7?pOr6TTfQpF*AUSKwV#LZxUj(IHmb z#PG*piFhSBspv}io2@=r(dfhG?^L$<7BV-%>*il?-IZf6Ck*o;GH=J~DSwAmi2e3) z<}n4v-*%X{SQ5-srZrP{#Q3kI{DSEwe~)z+hHb6v8I+}%_gV|UteZVf1P|i#AF^(S zb9d<-{s%k}A4dca$@BMHCj;Rsz#uLqq6O>zUyE-~>QYqo3}Pn{)il84*4I$Du8c!O zVC0-tAWcnM zi$xEUVO~Yi6%K38g5mfTPQ~I?6m4gGB#S)1V`Z&eqcnN(`eC(lpdx%TI45uj*C?Zb zle4(02%H@038j4}77Echs3;wxI>;uYI6!537nCA%1cl;3i`4!I?FTK&z`$y&twZaF zWie^tLAZ*N0c0FTPCjU%81`~#8aetwi<^GO3wq3yGD>BQGi%Y64&d~kkKloI7obD| z?j}G#Xp#MmEkPGfzhfS>c!enapoRY)mZ1?W&+IO~BSJrD@%Tgb?5#8rJ(5|fe$e8d z7ZP$SE$jy^*1lPrOJfg!0doRfAtK6Xu_6>JLQdcp*tIw?j7&yzRJ8X?NDl^9wi=#H zGJ2CNSxzPFr!a_s#dkQg$udRJ!bd>D-C#28CovSmSiG;~Yv!${+Hk%D&T_{iLvi3; zT%(NkP6Iv9w6!RMFuScgfx+iL92(kM(ZW3_s-!a+c3Ufk-PR+>(QV!5TX;$eO6kXI zxDB%wo$mln555MM?*bGm!1V;^wqCs5mY@rFwe zYo&?k@h5q@t-n2hk5}JjZxx7=`aO1a<^qAggB0D5OHSZ?KV_3JJU$dzknG?q z*Ly(U1Cnfrwc`B;LJALneG}!Y=y>c~GKE?2Xif#0FRcV~ifs3>Zcj{urHWg28M%@5 z;7*t04nm&cgxQdy^d_>O9tHMnx#FqXnZs&OmOf3$v-HB3$S>YWfUtLovd;c(rfZACb#KGCiFwy+8W zVFs)T&{j8%(RL`AYQhd$7v7hUbB@CdStW2hwQ^oh=F^i<&$gcH;2QZ!5RW|`SX)(P z)9s&ez`Z>UAID7>H<)6TItF}uMiQ$0?Su!W$zS%uFM{;@$bpxBSXHymiIr}#P&#! zHyms;M_4yJtxam@9GFig9~ttUWj(#GcFuvN4s(Ka6m8DNuye*M$mAnKzH_Yuhwv+i zimClye2K%HVcl`F5TCenZY7hC4Eg3*JD*6%xzAzFw;n`W)ynw>nS6}KcYzh(W_x?> zv*JIK$>)iD7hChtw_%PNZ9}Ic!CBLHsg-PLqgSgI-vB&fK8?_M}4+eJ7fqm+E`8(U- zS{WMOz`k(GYwm4x;9sRRKEqaPxc5&qf^NPp%j&%o}PTs>Z6E@*@r; z&DRP&(bj^`$%c8sw_*w5vsb=*trGMq0}=7i5F~i3)))0%@?mXj={qu=97Gr2J73v` zQr*hj-x)!~e{vz?RNs}~+x2uKE+C@+1|UZIazClBlxrQtRNprb*H_9TMC7lBj77ft zFbmNUH7zFsfZC*vO?#!ZHdoxaSYLi|uiMmBwmLhSba?+zjUrx32X^ps;q zBL4%vi|?;J#ISZMLFQ-!EIe?4f@`rN6svge?~v9S$=pA2?#WJY8n|Fqw3g!F;*& zSVGRv9cD-Q<8ui)J;RZc&N5gePheQ-Q*(U@tk~S-V3UW-*y`mcf2WmP>U3H`ss$@+EXJzBVCKRM}>@Jx%~0 zMhmWzb6`vDa+SG^3;fza?2=uvg4RPtBa~}VtLX9o8d>?a-~n0oI*X}+h&X@vM01~) z@4su;QzNwZ8tfq6m5ni0-1)=Wd(9+-lSlAt`OI%M5OzsXzLZ>grQk1e1{&T75b+zx z;CvqZTlRxD(DS)zfJe!EbuO4b>%5(?5PQ3}kcb~8lm0B2X8rX`?W$`pVWDV^Xk|T! zKBFDK+rX`epg#+Cu$Ew!g-E*q(*$Ki(4Pf+ThAV?uas#H;&kh|UGuUuQR`(nR#|vsB%jH$R$?TWMi8XTj~Y zxwK*nG+IueEkr~aEmnkLMZl*}l!;FzlhK?2qrIQTCBw8e$*Qd{1<`41B4F{&4h?Os zXzF{2qU0Vj?6y`6r>*&YMz*zvtFD1^ME;VfDq;y%+ML!Iel zj71sdOsDudDcDD33bWu$ryH5(Oy@5QGg=xuwM@k=8%eG?(+OOQ-(@hIFkzyUKksFq z>3q7Uc4qBUORp!yoawB`uTr_?hw~_<>=8oDna-uF>J;)8+2%~=$-UA()6p81{_1cK z$Qfu=y$!c(mwFcRnlqg~_{OC<4>EZ>Iov1ZCrcCa4k6c^>GT4(cHTJ-_XYXkzQnw% z$u(y>$8M@!Yf-w};U1Ak*66_k%bFH>hFo)|Q@JHE@8=HpsC@g5#Jrwy$ZO7Yy065q zGj(TcYCD%^lWER$&blUnSxlxm(`kg?R@UugPG-hB%ww_*zQ)ORgIL;HlwL@tInz0c zPk7eKxzS-Bms8L~YUR9#Omn7F2|E~Vwb&QQG-o=0)o81k-;im}bhg|t)g2h;OeZkj zW(KSeciBy2v~5kMIn&vJd54EFPR_m#Gh~&oO~^TyOmn8Q7au0wh4JOF$3xATj+!<~ zw>se7p56~jal0GLI_NVU4Ss@PbEb3Of3yW1$+VarZe^bkXU=qPx(z?kVwbK)W0T`# zJ7+q&k2gudrbH8YrlW}FOsAv6;h9biN6$h{PIK5i)6wiGyn<+QE;-Jbj^Y@n6<A3sE zzsNLaIxDWPopWF&CK%0}U=6}(bmxqBBh#Ge9NsL&-R?4nJIoo@FX(3vxXO&rBh#Ge zwAi1JbB)8CZ#{|*P%GzdGR>JzlO57*Q;ZRwCexhhJg~x^;SBRrGCBLJLNU^eg}R3McT&>Y`Ayl1OFziTww2HpXuad8nJ6n zsiM4v!@bk{5c-|Xr8**B;&AtS8^cX$vucIa7%iXdApZ28r52i+t7@%_SnfaqzQBXE ziwLW7%5QfdX}${dVOtB=na&|X%$ZJ1tvXdfcnF>hbEfkbdX(E*O2>Fog^lRqyBfns zn_YDdWAS_<%$d&XH`w)bBTgm4oaw9vqHZa(9K=-L=a^&ama>)zbEb1LKIB|i#$E@p z%GVF2)Rpln5#~(i5*YI_w3BTuUE5zA#BSgDSlBC+swVjHu;;o>!4l_M;sqRSQCO9owK(mMyx;~Iik8}K3l^pr`ThTux|X1NJrqUdY# zxLHp6vON|7wRlOm9i*0fN#ABYWUuy2zLwFSc2%N5vCxS8eOkx5sVR49MDkpoHFmSz<3X@T_dqe!M0LPY!mhkLmkfrWKya@{m zcYfm%?j?ldJ?VP+lycfifQY|N2FGgB?XnN5=&HXN&A$>skC3!qzKmU&yAryZ8SJ78 z@n1O@YiEs61~xL{L~y((y)2(XDRpIxa}ck~aZV|28J7~l@t$;4F2^dnu8f@y;#2u8 z>`+&aLqu@AC;cuLU~QvEv&)w8PaVYH@^g3@3`V*TW6CAm7oU@s}-Q2QG@c--YoArAYv2@dU;q3N}C$wdz1 zXPdDQSwzuBGEyv3@{z|AvbS}ZY4U0?>t;WL2tKUrYc9`+X4-iw z&BE)!!-KPYQrVXyS3RQp2`pn&Q}vaE?k`Tqk7I!!@LgM=%>TAp zkHh*;fGy94i}&G-oG@7beTR0Nq3MNj(eGp^Bj3>jMjj1!BU4^inTpaFC?oIP1D-yJ zGQ5uDDI?$C6uftsG=&unb9npThK;Xb5t^rMTr`IOW#h*!SfN(tbq?>@k6_`tnfDT) zEd1(6j)9d{VbyO&X#>Ae{g`cE7Zc{BM0{Tp*J2g#-hGBpZZ33EX~C$s@XIpgG&A~f zLB$cIPGuwEv9^*3aB}%!kcP+Bf2nN6-54GQ<0TJl$At zJ}$&+=Foyg(FupQ|FdU>=*OffR6VX+ltr{|to`pKWbWzko_+G|`ZA9vKsVNc-|d?U zN~`cyULACTvJ`asn#=jWSxS2*rxfJ$sfAe9Ww--S}EjO}kNjw%hB}!EQHY z7g6$)LqppsnsVy;>rG=DLb6B>=!|p%h zDAm@3ncV$HrBw%Z`VZ^Z{RUjdppqSY%aZkPo!~=IyWr1#E3QQy@bJ6n&R<61p?y7` z_+=Jjdt&0yy}oj2@WhIypIj-i$X8%^V$D#ERMdhD?THuvh{I0R@BUHJisB5kC;sFU z`>934JCi)^i6`EQaWe-=Em*`aa(GE{$ZtZhqIRjOl| z{x%WD6A!-A?p{i>a56Nh@x+&ZTn`uBjxuusUSOk)7Ar!rB6KeTm1mv#b=!q6w6I-z zy$!zXH1(`&f&Ea$!&WjgqHoCfB1H6~Xld5{@PgX)5^K)+dunc`RRT}OMTtz&T(v-f zX4`>{_Q9psXQm_z>)LNrJljg%1&%7|6UtN%y|uJ@|DvDqktxK=uzK{=l+?!Z?5@pK zx5ZFW0NCgri0OU~V!Xdr)7d#L>vr~XO1C>ZGr2pvN-GE8boNGoj9YM?5Yw@7J1A3w z^My!enl%{BPrJx;b_-ALN@!mezKMP<^jLp}PNrvCSRgC|d^v`)K zq3Mm7iKG>3ID;eu3V{>;3`qT{!||!}z$?Q6nbmw8Yz}y^Ubq-ujV3OQ8T?lXSYky)mkZpys6ilNys#$Dt;3$|;9VJdZ2woicmC++u z%i^I{Sg1JQN3Eh1kk%homKAwOufizzO#V+6o*hO?iYN~vD*7V+d~MD-qGA-No3V<} zc&32o7xF4DL7X4$b8#k5?qXgy6ct;*=I7@$n(i-hksb#xo$0=MFEkh$s5 zAf^oH7|(!3lzWhh4?yN&rv@=4&k<3cYgBj^;-x_NG0kGiE+lOL^K%ha(E?;X9j8G| zF+ZPPs_22laTrCjRAoiQ86fhWgXdhO@267fQZWx?`hJaNN-997OzV6kM3hcGN*`ab z0cqX_FcSV!8!S)e)RUB`XetOjoZ3Gi3y~v0g+%MCtLPbmf8|U`jd}+n?{*L`b*#)3`XcY=R=kWjkL$HmrlgkC z(K>>JE}^6K4&oxpLqR1_YxEC@yrHb~Dyi;%oN<8=yshkFhe`7RRg0mO=J5%5 z)(uW`EfBnWX8PzalKWdxaWlxg9cRej8uB9`^Twh}o|yR_i2S}Xen(K#Rey7}?yP@+ zkb4in$DK`Uc$B%6qM9IPr(Xm_yElNS@Tf0A>(4<~@Hg@J!8$G2FTHx7(YAX5`@~{0IHP(>YNCsn1+Ax{k%B`)9ff@%>h;S zfCqu)jZt^qD_#PT_cV>2zEs}l97z5HjF-_JG@2>x>pFdW0&xglgqI_{i>Tz!fGJO; z-BA=o-b!+n19`nc<)&RznKHif)&5Edci&O&%quF8=8a3ElCXGQM3pWFgXcq9gmT|= ze%Ot6`Jsc6GLjnP68q*;EbeKF^^F z1(yQB$MuaSH5S?e2A}D6)f4bXK;;%(Q<<_&|6jyKlpF0Rw{R6dAk8JYMlwa@ByrM+ zUl3P4^P~~Kt4v&+t4vWIWK@JNfrXt}PcIEB@li-F5lA z;Ec~e?vSoJ`p{m(=83s&ev>Daunrm2>Vs5V?YXc}8Ez72;JBS{DZ9aUn{+ zlq1i3l;H>w;iIyW4EI>4!(V_DPHYT0l zgO|fZl-s5#_X%^)2AlhoAx7S?|C8MIL__X$JQ8cQpR6Rih20JcH%b$Ykc$74u+xdA zFl>+1{$i*WweUFT+?dQWxtE#nXuu=RcdV3gl1E&iYCD`6PG?e8ayW&ZholkL<5GN$ z14$) zS7S!i$=OJXi*m46w8p$_a=d3kG}wIzVDj_K6n5p~hDY|#Bhw(0{0^=D*suB4Ik;TUm`7czS_!_L*r1A)&mCxaFX&M;o zLG%_?o{VT^4lW>{g5aRWmLhuTE-Vy}A)R!6<>QD}KE%tNxGWirOZMyd>CR48O zQ z0Z3i0ZYx^q2Y1&ay|@_|)OQGKXk}MkgJ@+nE@E+eaJW8NPfoNl8&PqEB=(>w`vnu$ z%I6yN6;%+y-xM~LyD30|uG~$5Wr({MApaoEk8o+%sn%RmyOzKXOZ{Xml^=FRV!5A7 zu%+@*3o=k&{k zxUA01jZNrmN!C(%=1*9y%$1zRm&-UvT4G|<{ph#dUK~$cF&3F$9o@k#`s)wPP zH^ZcAz`~+s$aoq$!#F=phfksPm5)R->%YUvweW1@lj-k$;%BF}{Ho|3< z%*C}@J^;XQ#Eua#S5}TkdW$zzk0ywobl+s~l1- zZ-Yu4cU7nxy1O2D)$+fFyqDw^hP+;O0rDtGEB#Bj*~oqa@oInNCd4bNd0B!B|JF#Z zV6#mH^X-Z*guy|t9FEHx$xUe;$P8KeDCm{<@p3yZ_*1=a;Wx{L;QLS|L;EP|ArsC+ zy=pqwZAh_m&EyMF%<>}$8+30QE8e3h?j$a7GG4L{4U_S%rMeDyYbBCc-3 zCn9P!{5sX})`n^8RJ&UnCMGuf4k*NC-^&KHo87c8Np}0b`S0z^9nk%z9uM2~IontH z4XbRNUGfV2T6?}dVKG%}+t2-bMNd%qi8bQBL45iu`I_njQGRw#9WgHK zvl4g7FgqPJGs0Wv_Er61JQ}iQA$1K>dUk5c3?DMduSZ!;%aF`(f}~oeHRW3LJR}$Y zi1LoXFcVR0E3@(sL|4lrAQoa-)58=}VnY81va%cKVo53xJ839T@)5tJ1ujP+5EDJc z9d(Oqg|aDbjS+Kgz2ZV|C5Y>pLUHcdt{Q^)ShqHt>(pbJI`vpC&j*0h1g4-%5#`*m zT!kbqeC$e`g^ovEJ80W|V#4_-NZC+HOb$Da`Wr6~Q)Ys2l`MgJocb_?epKXY*&gyv zgZO!1`F`?NPC&eJBrZ#Mth1NY6|%A%@ygz~D8n2BH84}zc?oUO;cZ=OWmeR7wHyYi zJiJ+NwA*Mx6kB;M=oq*20bvVFG?|?icp9q0$ME!Mo5^;YoqeUOoDO>B1YFk0i_yXA zK;VY1qX$~c%Tm<^VYXGPg*Y?`1<%xG``m^u`y=q;>rsDG^uKBPAm0^+-c(k2E^bBXxvA^hgKU zjhKQY_0Bk}Wfes5Fz_N%m&JCS@_m(;Y0K;a_GoO) zwg>5d+4j7LPF&BnXT~UK$>+|JX1O+h?hW1r0Psn9%~qbv#h$SlY;_+oRRh%K#Ra&y zY%RJcGT>Pa`ccMW(X2KJ;y4{;n-8%rIx=Rdu zr3q`Po5*z3UHqicb_klqlwSq8M#I|^$~K5yM2L#Nk@v(5dva1Y!NkJXHsBYvPsj4j zECv@2(#yD~kf&z(c1ni!omlyA@G#3}5RC7RUZ(UvNgI<4Cu9 z_&`CmT)oaL?byMt_Sj3iyFuoiUC3 zL@j@y7wmgb1HEAX1hGVqFa|2Al^ZIPF!Ya-WUt)b|F@yPTHXc`T)iDKwRy^}6)LV) zZ&?2W@(KLmM2PWi^;G^w-?&=tLwnFa9x*v`OcVNUtlu7qxVrX8MAT!CtY)@7h9j0} zkLgf}_E^sf+a6M{WDk!OvTQ-%eZL_H-$Uc!+_}E4v`eG^gvKi7J&eq;<6|7yerg;LX zT6^}+S(@PN*{2(V+Ot#WTOichv*$!ydv@Al6bRLF`PIOpiH4wgxM%+s@yd^JStXC- zS}msokPnrs2*94b6XHu~qrD_x&prL{7mu3t|2g}lsG%++!Mlw&uZ zWNNzGg#We=PsG*j#)*h}y75lbRS)a85KHXF9iS3>Vdz6wH?Ecg47r5l+7taelFVq| z%Xq!3BE0}UWGeWqT~X}A6BfXAAoVSP^%2&)P>K)c>W;V_u0e{0v#-VcI}uhlMWoKv zy(rY&-HVnX#a^VmJF|QQg5%xUC1NdH-kpS6-u*hm#WH!Ths)$@Ib)Mv1BZ^Hx#8W_ z8n{|^zecxl3CVTb^`+x4!=ef-cT+B@25~2d^|apqP_S;jw$a4a)BfX6Q}KUHXgi}d zl6#vv+%qh-eOx0`>f>g~$_VKCZ?znY%5XvNpQDZ1(1bpCY_ku#8xVG&m#M$$DZ1#e z0XH?FzgE$8xHUyEGX-YB4B_3Z*WeYVuyb9Ssx^4E z%mRdK@G8jR+@jXqtL1R?1`f*eO^(Oxoc}VnB;wpHsSn2z5%o0dN@%T{^mNln;x}o+7TtFgS3wmY^o2^bkACE3fX8EW;+6OkB#yv zjKqR-Zp(wZ)3pP*T_4!V4ebhsa*CZ46|PuX1!V- zRDtY!)Ke`l1%^F&uO=j%@F~}G6fAeWmFo$8igN27;O=v(`g1*1osPm-)e>g5tGYy0 zo)o*5P`+C30G7RNCpqpBtb3ao@1KFW(vOCnb-k{~kTY~QxzdEbXX|g+M4Y>VdPF87 z>ZxtpmUhD~Myz(jY6UxJ1=RucM8@mZL^ajutymR;zf^wnI-2T5(^T`X#UTU8zE{vx z1;DVWj%h-DO_lw5Jxw(P#Cnu}&QZQuZldzlG8KifVXBz9R#WXIrB+j&3M`xIC^>a2 zs4Y?Q1(;~6Y8k+2WIg7A;@g|pJgUp>1k2qbs>{7h33a>NbZD>L;=}dS<2_Rk@uf>^ z(>5fTVN}IHk#cl(U9KobIi0{UO_&1PT+-C>u2Wy7D!Z}lq z%Atr>4xo)U%cBV;Un?h%hNw87uJ&x$yJYcRl> zN&FF)Rnm{TF6oTRQLxY?l>-p3EXDiYwQjJ*VJ5gJjqn2;tr2Xt66>jv%6N|>##S6JK?{8cdfPVlYr z${lQmO>Bn6qrg%3Q4~ktDfkvb>>Dr;ejy<0CV-(kHv#^DnW5hf@T;N6!s*(Mt4$aj zCo69Uy>h!&@)_X9PzlH(ZxP~`%*Lf(9!>%$BXTYxDDZCppFp0MHBjBo5Z_7M7=*%n;uPTIvv11M4RuvnDuw)hu9cswqW#dY!@A6}?fdwnCgWO<$3 zE|FTNQ~yWi7P+t(k3GWeNGYRNw8YKIZ5{AeEU|+bR!bfcAsu>&`CP=u<%Z$@2p8mDxCz zH_tmL)rC`e^SvKNTr`#UWbd7jS3B=S?>4AZyWC0Mb$KqD%6pFYf#y!$Z8E12Hf$&# z>xmn0-Kq1E{}(@c*3U>e(Z-fygzy)^wOI4ZX{(EHTP2O<{D1kgTN!nKC@aPvz~WEw zsh8nj3VQJW&%Xu~?TzleResnIrz1lCgP{8@s*`P(=S-O_ zV)*%}wl>Y$DsRdJE=eYLvT5G!a@v%O@NASUN1xPr16yT(^!-?4`8jH+X`!uhF$PvF zMP@d&Y1!N5+{p_?EKM#guxaS<_hSte%amvIwrN#6q`x0%5!uekE94H@3Atj~a&9vl zD6Ac_6xm{}t4*6OX#F+fiDMy5M$U-99%9Y?5WcChu zB}?xpe+xK7sQGnk5WC2$a&6+?t@1gVvAaxxv(ULu-66Lj5sS;eu`1CtuZ;SWX!P|M zma##yTMr@bxK$Z_t9$}_$Ig(`9U6?j2t~xOyu(P;IPX?jjznyf>4XVriG$i zpy+lv`TY4JwpM=L1Siq9On!W#par|e-$LVRnm0J8DO_IHQ~t@aN!Fknvq9;ZkCrr`opJ-%U5jr; zO^mEyzu#cXZkb6fy5T`ERj+=#8-vn3X)|;&l*iBz|Ce7a>h~^2Qa9OnYCGF|+LR-8 zD1nkh%rj6@`(d4HQSvL$u{Pyv49nV-(;ZK3=jsB>X^gmM6jrz5{CDsuo?MzIIR&Y| z3s1%auOB=q#Z(WMzo3l;C2+v&2|UUljRf})MD!Ak|6)Zlgg39A6SQ13D2O3{qpn@w(RIqfM3YO1P!HO$YP`y_L zs}87O_3J9Q??)BfpNg&$X#7A=6+Adb1rJp*NQ>X7f>Ta}nCh8@0T*c6uQ9rJplSa! z6_oZ@!~qeN8d#u$@Ul$lN+s1=}uH!S>r#u;Y0Z z?EF~;H-ymp0+~1Ft6!R^ngV9$>#xHAmz8OXf5g9`2$tb%*z zs^Gp=D!Bhv6+G~`3LgAW1rPnHf`?mTs01?i6{+Bnu`1Yqi3&a~&vsD3bHh~d{7ebG@JgW3T6oSra-ef7pY+GW);l4 zTLtGmt%CUG+MyX)=A{DH-UIka)r-JGuDtN9ymHxubEtvhq9HsTk`&8IyOPw*)W$R)vb7$zdP z4C_N|z1w2?TZNpm@nDV+|B2)vCK$e#A6kjd=l4kSz0@XrT~HF_#~hVX1(Xbv7oLeI zzMe4%ESr(?7F-lvW7>*BMAB}6CzNUY*-hDy6YdtcWpAtQ!S^tw$>6!*TEQF8nBHKt z2wDXD6yvBN$j@$O2dg0Apa}9ynpHCB#l%=;1=|-O@CN&0qN)l6M`8R|g@P|*@~X-X z7BGbz%Lzr6FHqQC8GHt1Ju8A+V1uPr@EaJas>&-Ga#68n6V;g+JfhT@1%wqmm`8P* zp>Ba7-~J8-|Ar&V4&L0&WaK0t-^h$t2g|Uo5WzJtm<&GFh1oC2W%gBYpsJL!0M3JY z-c}C>Pl7%o*ja%aW29NZi_idsr5^%pH|!vrwn6c-RoKeNgmGFmvtDU0#GToo;m0p8 zMgz9WvfA{;wpf#z+>VTrd0M68tCS;qAtf?pYZ98;1gB8~k)ex$2t3k;cae|4U?g_pzBq@?9#UcN)=QS z@5_Vj3f6W;w83}`5ScX=QQ0RStQ)QE!?9TdK!!gAHM|0SRYWc%pb+NAyAP^nimnhM zveC}%?-9^BnMpI%CNWoS5~D*QL$+v(G36b^A8*9NO`LB7z9dev0l%4Gqt8QLQWZL* z<&UzvW>259PKZ_$z?K+Y}!Z|(@B&5%%a zVj3rvl+=>5vz(I+*fT1J9yv-xgj2Tcg&`ZnyQqwZS&C?RCF4_I?_fU_4X*-0@$`GY zxq+O(29R1M9i*Otq*M!a_9q=uS|>HZ__KVzQJwK=(n3n~9l)i@`oWOxOGCaU8&tF^ zAJL5}x|CS8F7fRif(NJC9RsZO0qmh~LGrCoP=>HP6`@9w^3d#|lRZeL7#|S`eTAY! zVUrAnHbbFMs!3*qaFbnx(o8Zt)Cw{}=_Z*UdLDX(nwn&XP(F$dWte1-&?0g(O|m33 z6kaWqWs(C!Cz!-W2YIaAA)(zYx4B7<4h5)lRDP_w>cr5uEH_7fsglz|{Rz%BHJTUt zncO^+To|fE4+<5?eM<60ArG}`E7^BExm7YWkD1%aogjO1FPEXVRI|PO9ao-slXwrLS zsEp}yybs9y56jTOzDS>@wD;s5l%Xi~9W1@@Lmqxt#kVpK?b?g&+UCrDSaVL6*CC$A zVcc#)M-d1Q&iANzC*chbDR>xdA0GM)t_A(Dh6xWNP{ba=h!3A`Izg-xyl{B5NqJ+dN|>VM@ExewHdiVn+6{x2p?li0$y=juWP~1yB?B3HSa$+E z(}1Ev`*a@&#GZm$p#!=;At31#0Qj_LME=qTXNwo|3Xp8M_80$Mz|1{f42hpZOcvP%3QvSc`Er`H@zj7>T(~Z zq2zbma4y~DXv~Yr`&9ee;-4q|yL{01yTpV^hGMKpgzJ2HDYK?pF84&~x&ke^1(2gK zStb9j>gRHS5$+4*Us$IkPqwrVWTPk_r7y|`5$+qL1-G0mWP%J`NQ7Go>BS|v+O6qe zD5_mz{tSEaI%_a^wk^0nkz-iVN8C1QFF%3>QVWzh zBiy#gB?LuW)(}m&kC7QzpCo^ShSk2Cn;N;9ZTG1W=rS6&I&wfBm|Jv1sXQ&Y^^sO< zkhj*5wY(>(oDF3BhRfYHTO8$GosHPj5$UM@Ka zkUh%gExDJj1AOYX!ad8#RgS#{g8N*zR`8zCRZ z0NLeMh1@WCXq^d=9^t-CuAn}<4Xy?f;ciauz$JNRz2#_05$^V6yggudyJge}YRiq_ zS14{`de_)=IH&X;dS(!@Kd`t;FVd+%tRdGny>%)S3!zEUOLZzEwhc1U%XBI`Hic3K z=v02}S4tVEQypS{N-5W=9a18h%ABoJ<6>(mbB<1p7cIHHmL1{W(kodn zy;{-Tx4k^t0raYTLUICZDBtDzaKFVb0qI+gUgPG!W> z;I}f?=~N_kO*^F4>r{5^)ncTs9*iDp<>tp4VCZLTWKXhkJBXwPnBB2p?BM0{(Kj`U zXC75X-&m-h*Zw%JNvt2%maJRA*Iw>Nx663A9SzX36o&@#hb|!eS4G1`AghZ=+SwF% zZ_CMqY(rnpI7FrllYqg(pB;BOu7Ebs5#u zfzrS?0~QTW;G}MT6+OZO5_uoVZ6ucxW`P@-M0!3vPxu*2(@BAXTyaLOB)1Q_pIa&( z=2fqM4Q?F>Ninpi*B9Y|i|mIbP52EXjNGt7MqVcK3^Knn?u``9%n7`Kc$DY}&uQeH z{||F-9$!^;{SEJZZjyTm3AxE3A=Ayhf!qKxLl6icfe;1-nbfKn0TB@)fPl*2fJ)V3 zod=x7v0AmYYU@<3b*e+P*0Hs=wOajIt+Q6^_)=^&tB`h z*Is+?b@th3pR?A*mG2+na4hd`KHR;rUt>k$uZ}V)2{Rx!b?hSO;_v8&%e@}Un}z>m zw5cQPXv~HFACy*mJW=JiMfwNGlFmIG?Z+M_4_(7CsO2V+ZN`-3H#wYijm&_Z;M(63 zmcgv%4|NkIDG>5IBx#Yv4JLa#M%rHzmBb~R&hJufEjOU-RQBI1yLK+OrR>eP*8RUm zn5LHdRQBJv68%lCM?}m0*c#Vq+{3c3VlMHobr2aB$K}?Ry#V&_f93?!O5Big!^?if z(KtD`zukR;dtvraj>zr1L2#SQzDEtu=~hE0h}&rP&(!jPTrC}fJ8bq44%``0gxvXR zx$kDbr}^K#E0eo%b~IKr{#Z8vyqbNnUFrM2+^DmE!UME_cH}Z(7n#1?y|Wi^TrTE> zW~E={VES@P&;A^9zJJFqJnr+^_b}D^r|lw>n}7CS_2B)3Llu=-%N;>`9xZBYOqpGW z)R!rsFLw;>=WNucogz$M?jYKeF~$1l=hP~;X8Lk((ay!Z?q8atvIB6}(f)zvy1iQo zwT}>R36)s6--mCSnGihg!1#f_-lebtV$+&=6d2N=E z{RkQ3$+`BvKni}|53^EUU+#h0g~WW6tFYi|xjSkPBVe#wsfP8qX@?u9_5@nraBorP zfUvkC`f^Xzmax%xM+QaYFmGzP+iI^RU=KGD@Io@R+rg}R4vh&B28cJ5!>E0{C6Te0!&|S9NY1{U|xvuv>bPn?K!OMy$Dar)mFfm z?crmxvBjzi|o(Ao8ZZL)XXl2^(OObx#4ZwiMi2gb)sIU zjnr~4+@8k9-DbJ?a+h@{VhFd%Kf#ocs9CzlUg)fv!*z{hn$|7rFg43GmuH4h=`R3G z)~r3d&js06t-8bH$z}F`Z$8j__>sg`h zd2!>*)Se5P^@jku(HR3NC~+~8J!`q6Z|}styXGvbS1j*`rj}d$_8!vDk324zTJHJV z2T0#xsdDg0$Cm*1GMGqBBE|<<+o^+2A+%YaZ8Kx|robMK;jGETq=$8RLd{Jn-yhij z=0ngzJj+p(spZQAdow1mnhV{tr7Fp{3-)FVU=6-krD8r+l7p$`I|q9XjIU-zjG`mU z0z3qg$vAj5$3&;(a4leJ`I5rkf%{KQhbvC=)+L)fcM zPqYVX`HI61o(0N5p7v4r$Ztyd;=^tu{4+eSa`VR2&LeGc2DBNG!SPAP0P>bs%QqzU z7(!-8#zP=(CgjT#TZ#o!%>t`4p|p}h9D?sv>;vP0c*B{9b}wJF*niTfH@QxdyR2L! zItB3si#=*jAlAB03h%?FmakpxO&pzpvD{0xmTzI~a|6KK>8?Gkldom$8RZ?!UyW9fWoDM@ul9S)WY*G91jPO3b%*+0mxp zvwJ4FR}aE*>@j4o!f(KQITGb}qKJ*Kn-3xTn%_Y3H^+k)F=SC&hM{I7K@138VC+`R z$E3Oov*s%DE4Y)zY0c zE66LtBwtWx8&FPrjODcOc)M)P5vHK8JrYAE1t-|IU}ptc`zbC06(!Xmco$WfD6cLG z?&Jc_!;c|?l>f;@TwwLY?sYAPxL?bp&+8Ezhqr^eQ7x}i2mTCZ-DF!v213aHH>9;X zn*TL8ro|_pKZ98}!!g_|rRFAb?!;7Iw@)P+B>hxYfV|p|Y3BV)0sCH$2S^z@2awKa z-abVc0>u+MWd4-U42yfQjVpr~x-) zkH%f#FbqZ~cOkj2;fjkl*aSFTxYLdycNOe6KEU?IAOsbgwkyfWD+gzoU4bVAUVnj9 zwGi5rnf5qw=Qaa3#`-Xnu9!uD=vGzR2L#42;m0T0m&SJ|kYLkR+5p@eFqimrTaOu$ z;}8nfI-3aL?-a)8+Ivwo?xA9lUt`vD>IF)PInvLn%}-9R=I(m}{!_CBl-0*L4= zGtIOs3Hcd@EPjmrG*S^e(dWTTdnkl#vd2y8lyW2?JeX;3hL96%7F%jTMWQngW?BI+ zE~)5bdyyDGq($23VT9E6!W8qc{T4=06x1qI`4Ea$U&yRltxJ$t{nEj7Ou4PTfq) z2O$JMK&I9$?~_B#_4;fnfB!6==dFP4(Ux2-SQGXSm?hrJm`7+u!E`ZW4>I{xW4w=7kBdH9at7ahD6_9u z2}_HXtT~)a{&re?koQAuIF8DvSZTbeIXPrDc{P|s_?b#jXU$b)@`u&p6TMZrCGQWJ z)4cV?IVJx@=0DJd@qN87FbQ!M3B`U+Cf6SE{k*G2>`*|X0Q28@$bMLMQt>wEAaj-oUgA=W# z>!e_6vKVHSHq;(Cu`4#57);Id02Ut=I~li{s7O_Ob%1~y+^C$UMGWW1ggu9in)m^+ zB8*fp`qH-M+sNe#HUEn_7=vgn+O$6hfcS>kC|qp0h{%*!V<1O`#qAe48bzAcH-N>@ zjm__N!kHN&cE+y2eKA^Tq-jSJ_S6zI?SWV=Z%S@nliOJ*uI7>u_gbuHPAdhbO?@(i zeHt5&5#6QM+@btDD3HI(E>Ct-AkygC!GW$dd4DS04HoPD5H>O|iD_aN9U{$I6JlHQ zR^qyjOr3HwcaIkr6UN_Kj4#Yvfu(V`B2CRbA#7#dXf!5QnyLAFh&U51#n-h;~}ep%k{axO~IKGNlrL+mf|rgk^bAvS7f>w;4F>BzGxcGDu=pI~`Y!ykfI zmHv{Khv~7~F3@#d%^yPSCwV{4G5AjPfg%<8{3WQU zBAwWX*&G-a7?fXud&=$$n3^Aj*p~c}n8~}xMl`=KEO2;!J7$RP3v@2)YTgSAtjYgl z`R)pc`aGDLT0E<1$8XAiHNCq6Vau^~6BgK(KW(?2*|`{PJBbBeEOZU_of)!$? z&FGnoSKBXG!cIns9o|ITt=Z~sN^}mw-mkF4PN<)M!}Ybn6HTW`U4+%du*{Gxf&7QO zYhjgb-yb(xsuJFL7m`_x*){$} z{xiLDm>bDF7AuJOcljN-p7`V!mb{)!?tkPz>)j7VRKc*d(kxT+bbyUl7hF};6&p^2 z*v|`L0}8g*bcKZ?O-&UnS8M)<_bOD6*33b1m9`Le8WfCAE0~lcEmG;K0G9u$HwpKT zsI;(BC-Q;-8$YaI7<#K)tf{##faND*eJ6E=MHKu8VMSOm#w1|Qq1+e`6VoP>s zSI!3eGzB8($FGKUM@2?@aIXNCzalmTGfos1>A_`$HDFa1Ul;!XLoA;%>*kBp=PEs& zFz%Mdx5XD?mXqB+2V*--&7A=v|Ag2Tn1iB-aH`S|t84xhV$X^_fL_@>HmXBU)FV3N zUlrSq7DY?3;ihIxh`25GG9EReh)x{@J3%Eq82e*RYr5jJ`6j&|#MdX*LVlV(R4<== z*C+17`9yalbE444K84e}z^aU2u*V?Ck6+00{GRCSV&pE;Tw;%Rdn0%8Z5i^CX6%_z znG!4Gmz7)q5!}5KZ`u4w_uvkoduie=n?JQ3i+Fw+Zf)wB$(V!ZCSj`-VC_D-k#XlT z%z50$z^56Q*6ap|h~HO*d1fanv}KnQY?>F6_X6xM{@_D6jq=8=CvQ6@<@m#N8N+Y2 zQk)rgCh2@WjX!<>AJqf;O{8CgYU58W<1$9++K0`LlKBcP?ZpEjlIwt=VDr1=C7@9J zPaU{DM0j#Hbhku4J=zS`ocT#_9j700ne+kTFGzsQn0*T^rO+>GhB8l$JTvnj9>KxAx&$T zbPf7%9Ci&7jZG50jCc^0)RCNq1Lk&9x^WNtFwF5M4z;Kpq;d8X zv?c}_C|LY3dh=;$YsHPaajCrt*Fm2J!ZiCLwux-*p3t}dqP^){4#m`!Pv%kwqr@ASDo;* zh0R|OuqRlJAMXQ^y8vo@3i@j$0KIJojN(R^lhwj=?0k-0wfz*fDk{!xAzFHOBF01e z*XOXBdG=^re2p#kItYNHxc#1GTWvuJvu#^g@NF9GG@HU|l^ui~TIr|x_B~uejWg|S zFt|{v^kZjdVy>KmcBXHIp!&oeIHoyN^$99GG4TiqJgt}01rs%9Iqg}|3Eg)(|S2wPzD9rx&e)^;w1-W^TzkBPUj{z z^*0#8#t$qpa0!S_{jv%94_#}Lv8jtNlQn+i_?5A#_i`{lwtRn#pY)AYGO|FUMi%G? z8E8OdBSn5{=VIX)kS23l_{=^HVF8KXEj4~_2jR*MNPRdl{Kp;_A$`c1;S0M2W*ne4 zumxY*TVeS_wZ>`mbF=TEzQ(U@7IgHY%m!xK`Q$JzT;un)s4BvlD~}+YR2mDt0PjzQ zK>apl?(-}u{GOeB8inJ(AM%Sj`J7kH+#VQW`vpb!3R&*joZCR2h)sq(sn$$uB}*<_ z-+K+EyJ%(FmXJLF7g1w{_a9Wwt1W<-X+I=4pO#VY?TxMtIQ|?qNv?!xHxsjOAutWz z2N;U3m~jA_Y0nVz5XYv!_avUyyJBc*X4=QZ^niUg?%_S&62Xw|S5nS=Th4k$dT%5n ze6qCrV4KX;C+cuaUk~dX#KEM11I!?*!v6yRm_z{S6BB#$^mv@s%V~Xru4?)J#f>sG zg*UhScJxl_iK79lPaH?^&tNz1i`oL>67jzD)Uzj}1u5}evA%pCxe!#-m+pJ^eILB{ zNOe4V`kB7;>9Ze__YMi}1$*C*cz!Cy-AfscykP1iV?Kp;cG!i*t@PL*c4_flSZ$DQ zZR=lQq{FT+E`vCll(iG=Z^wXib8+KfW4>iU*~#iWpfMyb;I|9hJ4p21>`k500>zG( zk76+Oc%@q?{c&jC_jEWg5^7E9MImu}K=k{7Woqsqr#DQg?+$M=Rx}r8kT2^i#cKXQ z=0%uw`(Ez-tUg#yIOe~}z4B(X(LABv_ehh&6+V;18Mi zy~X<(F551}XNSx?y!Bx2T6_%=+=%acw|4_3_lVy9C~4VwL~xV5@BQ8&AR-kw#GORc zvF*R{ruPVHk>)wXOGIpE2R!1v3#q%x_=1Q5l<_-nKE_QojmVf_cY5GH?fIBh+2=v; z!afO}m`H(`=1Q$TF@UE^)#Er4FeREqwublRM7A~sW6A%10~T*ZLfIp&Nrnta!!a?a zcmR?goQ9GkZ?NVJ&#%H|QNlco#2$DZ^&khgl>Y(I`tP%ngPtdyKMijCEP5PV{51GF zk{oCm&iyZe)UT5c`i8t|hv1O%6kgOV+#JN8tB~LNw%Hwu?A0yOk3mnB-ZGPAM9Gps zfnMO6ag1$mpLq=WjT}K?QSIcjse|nZwoPhq+~5O&3N}}MI(y;IDCV1QabUloCl<6J zaliu~C(ZrFwlI9D{1EvS?U?+E_IHHfSG1+LmgXR`+KGuz!A$EZaWY5~6Fty?G=pwW zO!Vd{i=gOwei5nKd}z zZ-`6r(}hQJ$jP^lAV$~<{syBRxi9i>Gs~EQb|fvk$G{lcLlaK(TEx>mPLJ3O;JG-7 zjq^kq=DQ3j?e~bCIPl8xd__rhkro~GPXJEBfAUzM@Hfz30N_=$AbX~icvm0yMoe;|?+>wifIhUW#O6BFOznC9sb z>FM|>Tb#sdbPSlceglps&5~&rY@d$2{JxKjri{~Ks8~X}R(#Y=I30-phk|}st7G;F zTCj2y7<<6OeC0%(EXK)(J0SKW1jMt+jwf*1@gNQxV&G;zuxMRJ6;3-!aKPWX7m+>= zak)RkcrheNKZFS^F()__rkBK~?9_w);ddx+YyXdiXS3Qnd581TG z;m*n?1lD$3Ld@}INECD21rV5H`}q*T??|+_T>u!v^e$${cumd*5j0dU->h~UJ0Sdv&_E8ANMgP&HSHu+!)(|{KnBO z$lv7g>DyaxHVp^vM9d(!qZa_{I0NTq^NFBx8p{1c!Iggk{XR(AGQAr>JTK9)9Q2Nb zJj@fs7WQWDU;eFL#|fZs`UM*YgZ>B#AY@?&{lU!f?7hr~O1Tl|CtQJpT>w^^TdC`w z+8c2)altKigMQ%AYJ{<&(F<+@a(cn_VSyhq%gqMc=MTs}%dAXreK^7O85T1qZULR@ z&PCm}N;KTO2sCPVkE`n;mv+_m+=#X-sqJZy2UDlC=OWT>bJB{P#+5GZD$RV<*>;ik zhDeLd5ba+;5W{oC4yOqq{00e!bNF#CcNpoa_Cfm~n0zO0Y=Q*EjTPKUG>Le#Y|Co* zw$BHIUrj{rFmmt0ml2e_$gF#4E*u3o08agUcZF0Aj}E{*(p9?xYX{^@h-?6f5X9?I18w6aCR>19Mn;UIy(w*SXzgJ*}NX?;W#_Y72e>| z`1nM}RAQS6JMkf>`Ew#=^YzG5^Q{j3qo7n5lQ$pd4Er5U^Io&HA1-#qUUI1&wCNKk zdAKXvc>yiXmG-rhHX72{4}p|70%?Pz{jeWU(hm@0hwRc1Tfk;N^jWW+Inbp!{qTg- z{5g@b`Br4v52GD=ub@=de)uG(AEu7t2%gwuUuO>B+aa6YcfuAscxThk%@uH#3OK+C zcuob3L6%dE`821#mkHlOMg0xhyjqur*D37j*G}39$2-cUcF5)n!D8`%H8HpN7SBbQ9aW)%KmEjSfIp2DJ>@{FSREV6E@AmVg=*>{82-Qp@JETqObKjX=_b zgwBG?0APaxz|8?57&vKAKn;sW8}z7F^`)yS!1OytdpI6c^_;6J0DRDGRRJ|D9<8c) zwD#Shm{&Lh@9*lk*rj&R<~_k;g96r)K#eX93aCNBE`4`^)Ut{d1pHtg#ALO@RS>YQ z>9&G^8WikO!PQbhWY$mfAvOLVNJh}!9{8nb2JJ05`<!l8ATelfG#=Bb_6AJ#Jz_gu(B5qlw0DRG?bR10E!!k$?+^{zdu(4SVYf-p z-XR*aH>5Fm?hp;yyCuLeXzvgW+8g2?#s=>Y4cZ&hq^t(*?OJxR1nmuRQdWcZhBPUw zL3>?U{Ius_^q~gr)%S%OwDa zVM);5u5Xhi^+n_;vm4;7mBxrAg2JLl3?{o>;+n_;v9Wn283EJDBL3QUZP1{-j+i`Mg7!9O&|XK(oGw9o8#HLIBSOu0vrtom_A2rAZ4$J%L4)=xZST`1 zXm5iC?Nyo!+Ph7H_BLqH-l6-uE1N-k8#HLI($MJ)+S{N(dzF^AO@j6|XwY7zY0%!& zBxrAg2JJmng@T5ly$u?)S83j94BFeEL3@>yz+fD+!8B;lUZrW!-qR##Z-WNyeI<~| z&)XX`Xs^<|GbCv5Pzl=mJ6r>JrEJfTpuIySXs@FI$DqAKC1|gs<((lxdxuKUUPnuu zAwheGO3+?MOP(P?dxuKUUPsHEAwheGO3+?MYdJ%L_70Vxy-IUIdxuKUUL|SJ-k}n- z*MZKEpuIySXs?r{L3@X3&|a?1vAxfry~8wUucQ2bcm*I~bBAfrUY$Jy3WN4`r6gK% zDNn%Mq8JU@+m*tgy>s?yf=p6y&ADsg9*NjJ!6yaBK9`p?#YO~c?blvCo_rsF_W1T9JoP} zgU^+u{ccGXy&%b9?=k7IqPBp^QEif}9*NFR^jI@blC^6kIhsH1km#{)rz9H~gD%nI z*ndfK+_#c!VqCvOkK;#5a>5)*PCQYPtqf9^=yCERlAQ8KNlyJhl5JiO@_v|=;g(N>ZS(5EPljP~uqTn-c6%z97%R+qazmogo$C7)#L2@rnmgJ=?C3*QENnY6^ z$)A49r03zKxH=L&mko!FC3-F&FUgVnOR{2>Bu8zNWaSl-th!T@j;AGA{edKFlDIP_ zdah>>@!`y`VEq7|Ggw<{#lYAeXFlDn^9Qr7&SB;#v*<|d3` z(!2E|Nv6^pCDFT$k-QVVr++8OjGov(P4u4GUy{A2O0v%qNoJiY$?O{?+4liS=Df(H zV#ZgJoSP5(PgI;=B*_I0l3X}Nl8X+PVh*>RO5m)tMOrGJ*>vM(gLyadnGiHa*y zl3dv)$yM~xOH^EajwCyOCCN1}NOJ9al3drT4+Taw)SjU@M_@Mb(wasOVD z{Cbfj5Byk?2XB((p{FHz_#H_e>A^RU6_4_>u|&mVvn6?awIshgO_C>WlH|#!BzfxJ zl04lDTiuC@-}jf~*%nD&JVcULH%jvQg_68|JCYt1h4y(#d)f~rEwZJx-A3$vD$hmi zO?Mlyw-ocmnRS?(HDa&#eaTY14&duBiP(!b@xK59`BI72>tPZvdjkmTlkPu^Q~dRo zz`VUBFmD0Bp{bhi4mjmpxc8Ajs^w$kz9zxSU4-&P+|70QTUkZCVj+?~8aK5vaU?Po z9XRAaiGwYu;&dl*Q{yWMwEkf@%E44o7m1r1Uo#KtIOPPuys1o3QWfV&iJKaKLP}^G z2s723I3;c>-k2zpwV0|R%uo_HHQp+xelHMw@_Z3plc-Ge2B|i`w}*K>p5mN4bkRse zSYKMl$QOl&6O0~AGx9~@GR@_g6z9I7ixA1OaHRrQ?TrdUV29=sCbdI=O#mLJz>TDY zz)hM>)NKk^eB0g8WhoY<|H5n9_TlOPK|n)_a{NrwrHH>q0qYxYk~m$SI?c zl}~&Ul}IGA^4TxqG?Nbr?8f>TIK)hTA=-=22Ku+4jo)~c;8rFW2(uq=H_}1URXmvL zy+M<38-*J^@~lq^O3X#lP`!d|hTAAS%gVV9Z2nGO#d+1dS4q^3!tR2kkTCF(|D-x!SmO=ML(m}=fUCF(}uidbh0)P$;!CX-P&3Xh4Y z{gR>v?(FwC5b`xRSv7CH5`v?!!^z5lH}bA5Avg*df`e*ni>2+-?EVshqwo{$&T8Jc zB?L#|L7sM4SjaT;b}k_}3K@cR|^t?qwo!9Hrm(MkA(3`?=_*V4y(Q94 zZ;7kzu})0MEr`?v|nfo{N2#)d0`My8z#WgOg0iKI%T~-4;7pGiS13VX} z?RUkTV>Q5YalOsM>d@<_0iKKdxfW@F=i>e@s{x*i2iiMfA6`EV@LW8^&Vg-)0iKJ8 z+8e+Q13VWGvxSfy26!$WWveCc0z4N_un!8}1$ZuQv%EIL0MEtKTz;npcrKpq@-D!0 z@eG%D0iKIzT22FDfal_UMEh6};JJ9V%}D(S@LX_4e~jy19M`@FV0$mI)bjElGz)#R zZ94MF*@K7S`~sZkCFc%dfal~q1^^zy0ME(!p95{`?t{@K_oW5F2oFI+vu3027#1c!>;p2w4f}oM-A7&^gagBg(yLKxZzm z?#veE*i&A=MBSBX;|$7|^-+0o6YV zMSh2Jl?nDrhC>zJ6iKCi89su9RNoYgNA*Pqvez2vSStaYix|+EITYe$RmDdX3Fut( zlHMVNSI>{=pkxz~i(7A{Kou%U%`4}kyor@UI znKW6}Vi82fW0>QMnj)>yHLs{4OA8I?Ty%@{c@-Z&C7^TBy&Pj*NSkUtj!Ho1qJ^$w zJKIsshg1pZT(sQPBj!QrqV{l8J)aN;bjEL#YNJF?HrOW*qr-6B)qKL0fX+ofb+}GQ zHJ^JWpmWj6ZtGD=)qF0NfX+ofkILv2G>DJP642QnUAF?0kw2CSa|&TVXMddL5_Jsd z>`&5MvM!lLu1#~5bqwh2Pt#ncjsczh>6#l*$AHfM49yL%V?bwrrsl@hF`%=*x8_>v z7|_|@M{_gk&cayvvp8=cpmW`M%+1!^ygCMS_UCABe%(&y=4oz09RoW13pBT19RoW1 z`)h9hItFz157FEKbqwh2w`=YPb$3(BV$B^`$AHfMVVXOrjsczh!!>ts9RoW1M`&(g z9RoW19hy6&jsczh)tYNJRosY>fX@C2Y?oV2i)m{>XMgKdLMjs+s1!FW7`nOq^g4!a zEi6W}Ftt0W6%l{oE zbDKxvF_%9z4}#n?G*5Ll7Yzy+k2$%B_vCyo2=e09+zFC+%*jS8q5s`vJc0B9n2wT9 zcuFS)3i9q<%^f3&$DDlIlgA%lo?$o|zTm34nIs{YlP|mST(CPPx0oaZbMk#RA<5ET zOLF5$;w~qTa7@yjTHKD3xXa1aQ6`IHqT()<#9hX(jZhaW^~rMuxZJ#wxXZ~WqfP2B zahH>SP+IjQq8N8Md4R06+zMTKl00+)`?{LDUJ`dX+2n9tALD!hS8pSXahLJSAgU56 z5V9Rfg-GI#nZ#XAE{RIw%1UpsDv3L45_dWI%C0@ieKv`^ocwEqX{x#LCUKXOn_Q2G zmeLy6Y21{PxXa0F9YhAhak*C~ahH>yIl;6BH;&xFleo*tlXLsqP3PR)leo*t+joQD zzMsThPM*`PhE5Ro1tsos@_}3}9fDhi5_dUyMie18sA_H`O5Ek--Mccmy(n>)lVjZg z@T&F2V5RS?xbrA+my>5l%$;3is<<5~ahH=9>i~z{XR5eIDRGyRckIIB#-+qvPM)@l zOzvh{#$8VSL$Bsenbq9ll(@^uvC-{@a7cZ*)T_7!Dsh*SpLU8cRooJlxXa1&b86Km z?<#JVO5Ek-r8z1)0Jl*k?s9T_w-TyPBY<(2lgp#sNpi%VD-&!}6}Mp}?sD?2PBrC< zHPw%i$G0`f0iMpz(hVfz0%GOW+_#mu%gHgGJ}q}bs<@jg0hf~>3@TR8Mv7|ghj2Bu&5rl=7rguN-8&nqDP{p~>ougheIQJ+U+yC?UZ)>NG4W zi$!l>qGVGeQ4EV(r(scbjUsbiq@GzsF)V7GhDGfjwT>w13=NAKK~XWED@)UKHc@9# zXAO%=s`Y!IdrZ?$$zxd5It`1etyWE+CXZoJ>ohE?_87});n5NnwNAsLO2IuPENY#G zMO9H!jTs{^5yr@yiTpcxyY$8+4uBqGujaV+X&E;wjX=$XIIkYnJcc93Sg6&LZ5bJG z)}I^FS{==w4~}U*iVVg=twtV#|m?r`{0KmO7fgm6{;2P-_qiRe-KTA)YU|#w4D=v4W=+G>M8^ zP*nlL+t${z=PMI0f-vHD_%47i&=OT+C-T!QPe08f+1kAr7S$`UL_)hRVNoZwWB_4U zR8PaAvLoR!`g@i%la_{D4U6iyuaK)x?~_g@gke!V4U76$@Hz!tP3{iNS)PVP?E|ap z zX=w`~VOUgwga^}dWC#hvq6$}9q>Vn05Qarfg<(G<{4K{mVTKi~1q2!-&47$!`b{8WuH*kfCZC9U?R=Y80Wju=zwVEUKqrQE5xA z7SpsQWNKJcT2U}vOc9qbemCZ6Sk!3ADL1pEFf}Y{v}DucWHKzOr(se1;A#o$l+eAx z)Uc?f!qh3M$z*|KSX58LqIM~n3Yi)fwM)rnG8q=t)3B&|r*L9_Kql87o`yy3!rVY6 zZ!w;RMU66BcaVAc9x(ABENad;o7P_j5HARe+6ggD&y&lrsGf#J?IQgnGXIi+^dKy1 zq-bjoOih+yQN186s&Gvcqs>TIR8PaA%JRa^@=Y_yq&K#wVNrP}bYrSBR?|@-Lc^l+ zmL3=$?bl2>lZYi7jHh8ycM&n=)&QVkQFoEh^eh4N`1Uj`YSb|0LT-|Hy@JrNs8NI( zcNwzn}|3Whm?jzjUu#`#UVn&qUItZ^G4I@Ay&hpMzP_^QFCFY>6Q?&Q^KM~5s{|7Kp4ZKdO=uJZJON9I&n>3g*Xk1 zx{Ek%>L4uRRGfxI?G_eo?+=0kDGiIdOMytEPYkf0hDGfbi}ij8)3B(!z#`51V~EwT zsFA5tZszXs;!DDYZ9*?;Sk!Jsnx<+@KMK>ZsJYTi)8r7LVNr7tx`JrxAdF#AJq?T6 zZGFCW`E?;y!=mmU8}0WOgHkCCi`spuNastuUQtQIqVB$cX&QkQl&Y^`QM<=RhM_$y zpkYy?1;i5NpzS?7#A;a7?y(Wg?+XiPSk&DYFir1;*cJ(k+C4U+xrFMKZqcx)-4~FB z8xN*wHl7o-Z!|1w_XR|K9!%4TVF3+`+I@krmrT>o!vY!>wfh2{i_uB{V1d_QZ7B_l z$^uwkci!PT4M(qsN&I0X%dn^^4U4*)5}kuEbsS4DENV)^qOvdW=~38`x(I8MxN_VQ zNNHHq$URtJOiBSBOzWwX$grrXAS|jXOkGUVPI5ml#1*7rQK?F>U}}1ROom1EG%RWt z=Bs2LyBC-m7L^ZyVaWpQ;mH0+O2eW?6%1P|%`#0x0<5QDQKMLzvV-jx!Za*u6c&m! zOJ*95SXhDD9SauvKdzpSkx#iqToLX zV^~yA!=grE5e1XO9W13`QKPV21;+$fPs5^ii#1J$hA<6_8ihp^dSPXX4`u&CW)P1E-QETv&lqp*mAjU%8S!=ic`7BvcsC^$cWr8F#R z6qc*trU2_{Sk!K@xatE~O2eW?VG#v?M;ODRdKwm$A0tOs-p=s^>n@!=mmUE7dz4I>J&K7ByOmon)FW4G|g^HHzreL9lzO zt{N6~_c*N@KM4)-m5Jvezk)qfnb2cxh8tvhBy)_=M-0X#Qvp_`cR?NBdw3VJJm1wb zEUI^r<`Q)bq~~4y3IsAN>WKH7u&E}+E-U#Q(zuaVUWM{035%+GY31}sOi5T&e)w%| z`7^xvNLW+~u(nM%G9)Z2_c83KYoL#WMfD^ssy=#sORyRB0C^0H>PcACC~xFTga_O$*`!N3yUgr?ZZ)xWWGX6ld!0~?b$LdIBFJo42$YX zSkwql?uPD`xQ-}>MNMf~)K2MYCbGdX@*JWV7S(fMQ57c4*4xND6ie7t5EeDU9rYx+ zd{FfyENVmzv!{ebO=(!v2t)Og+px*7OF6dgyYI?G4A1rzw1z}wSqcrF)&N3Z3~_8a zBla+7O$8tLr%l57(yp8tX4(Ou@=Zl-CZi7FErb~FwACPUXCWeH?->ws1?ZgYB6KN; zxr_z(;a7t=r;ZT*ByyZ4B4|ad69h`G=CnYxnf5in+z;IaJWO6ZYBj?|zrCr8ne{k! zGwoRTcG+Zq9NGTB?JIBt`x>s%vk{smN$jXSV`ksAYmhBtmB%AHXB3M|_AzAVb^yk< zx4_lk_MUe(PUoG%!$us;Tn2n%7i=ZFdfuwqK$)%)5NJYbg{{*(SU=BeO40iCbi+@_Nqov*u0c67exN9u6* zd~|oJEI25jd)hv$zNE#gq3*b;#5OpXlfJ5OtaC=?? zd8R;p3-iC73ChdMNpZe~Gvex77&nQ;p3Ec?W9BgFap38a9DJQ5?N3Rv=xs?3`;JM` zieU*RM;$21>V;@QqG-)#N!DH}$w+{XH+_mCtT-;rdq zDPw^xm69BvmE?qplAJV8l2cYoaw>oAD$#4(HIkh6OG!?DR+8K!; zD9PR%CE4d9NoL(G$?R7p+4o~f<|Oc~O`>YXP)W|^Pkbk;&Toe4Nq&8SBo90!$%C&+^3eB^JY0)=WuoeliIP0J zNRr1+mgMnkCHdWbl05O6Bu{=P$x|6TXeX+ko+8Qb7fSN%21#DzLwKUL^o_I!Y+-+m_QOqS+36@p@DV~Kf%!6gKGsyYm`9pAOdfQpJ90Eq!YA|p!GUeiK zST62{r{PK#@<`o^=kHyc#lq9vZ1Kc0#<)!W^>H&RB z(I^4;!Zqdi}rhKoGp}14k-XHNOJS77ff>UqCOqHtf8gc(v zkCGsj-hzUbWl|LgklPo1WlfdMlF%n0goBc(o;MSSMD>EXlI%B1l0$wV$+Gp59C?l; zD{e$mQ58340&A~+7G04yWJhuG)+69LvZL6Cslhs6bcdnaZ9Z>@{kIceLzmK8aVfoG z4p_E!2(A%t)^e(93V8QRG;#r0 zpS74w8h0x3F)eJBQ@SNl#d$0JI7B9`x$F%rx3PO)H(#N!T zndb6L1Mh+2V_LjYfvfgMg(0v*a|zS%Qvt%qwD>p$ZX_iHZqi)B6sV7Bf%=#hJdDfE z78IzDY5osJ;ro}ttURCYrlsdnd_MgpOV5*2x|^0>D5rEcEtPeOd|h2R@qYl5FRUwP ze}dEg{a^#^(E$hG5HkhpZkoRw_FjK3>loO-1ug%^n+#w&L2tu7xELWzJCMC->equN zo~OkdJ@Twi3QBB3GN}H1vgvtRe3q4S9qt14Jgw(SMm$f8&$n7yhmuPh@jNZwVc8z5 z#p=1%5zo`&L@Y9vGN3dQeWn`?lo`wgkvJ6=XJTAbsX3@klyaFQo~On1Jf$d8&oz^H zo)%x|sd1??TttcIX>s2eO~d+BNe-r-ODpj_EnX4pY=Ih9{bgj*^R)Pwm|8U{YWB{m zcY;u$o~QL(n2G0Uafg$Y125&8O*~JF>3K@EwZ+nQY4#BDJT3l2yR)9lI`KR$KFHG! z3k#W2F80Lpw3wc!1EdpXkhVAjm1jf-%qJOx%5JIWs!%*n;mg)JI0Dh$t>=PKJWq=k z*ySjw!xu$xEh(O-#cw#X(Z2pY>GV7;-sF0WRr(}rpQaPf<2=P~tN)B(dY%@qb^Rq^ z^2|^!KIwT{v~w7jqvCm5B%Y_d*sRZ^8P-hJ;VX_cMdEqN3tCyqFBi|#a`8MZSI^UW zE@#E_w0K}F7hliSt$3am@3dNvwu{SM@jNX)AwrM#2$#p=d0Kp`<;A&cpS?-M@(h~W zZslAJNrt9CJx}Z7xDSfwY4K7k=TVG$o|bZvE}o~w^gN}}ntCq4#q+edPb|_u`Sd(3 z8QKQ!@G{&h#Pc-iJWrFwVq}=b#Pc*+=9V4md7AWHRy|Mg0W1f{>!+Tl$#R!f&(mau z%c|!ozJ*n=dY&dLT~<9$lT|LOo~OwgmsQWxWUb4p=V>zKvg&!7OxymLjl6#9d77-Z zLnN!7r^$YqaZus&uz~_ANJT18|+DD`9KQN{C z9w^;e&&Pl9JT0k>^oA+ret>wMmeBK5+s@4a@jNa0rxO_IG;SG)=V{4hB3a91xP>5| zrzKZ(l9gA=eFgD6EurVBC|$};2Jt*Cq30=Sve-5C+;$Mp(~_o0YjhgAdPaa0+dR!d!-2V{I(~_S$TqmTSyCULwTC%d+dQ?(9cTL3ewB+Ye8J&Vg za6?5rPb)@eS70)#7^`;?lcncr#W>9+vh+Nyn54O6Hi<{OiZ;zvX6boaF->!sEIm&v zrfY6MmY$~-Gc-3mOV87anVK7$rRQnI-kNL4((|-pAI;6k((|-p7UvCko@VKJS}|L5 z^Ro0jt(c>^`B{3NR?O4ff-F5xD;8*OzwG^Z_^jApbNgrMd0KIZ<_^fx^R%K}b3e$^ z^R!~I<_^r#^R(hH%^j4b=V`^^nmag4&(n${G`BEI&(n$y%^i}Z=V`@i&9$2hx2eSQ zwBiJ|%dMu_`)0I#eK@dNI|xZ9I8Y5$cu1z_Y31ozdY)FELBrx~PS4ZIA89TxOV88F z?V3wu>3Ldtrsj&W^gONnvF4IldY)FErMcc&dY)FEt+~o9Jx?po(Ohkoo~M=a41-^a z%F^?+@`C-LCOl8G^gOM+kXno9>A{#fGQ;-3g;Kz%Jb1QMC)CmQeylAk@A(#^YBSt- z6DQQl-{><;0tI{?5Kx*tI-yoROoH`EfZ1~cFT;>q;)GgxzcNVxlbOt|WYP(>@;mKN z62QCzNs7hm|3CztP%Eu0VysUJATJRjLb%^2PN^fIdTuR> z6RQ8VCr?7YJkD^Ke8JUo*HN5M{g+*NuH7Azdy(RV>c8*iCV2|blH8#bCsh9k$0Xe? zX1Q-EPN@FsD3ir8XK_1IoKXE8-Eg_fDNd;VlhGy(5hqmt4@#?FPZXU{{R3QR9hrut zC&@z>u&?X6Jt|J9ev`v>eWLRPTz?N?bVBuqx~Yv62%(>0N+fY>Rh&@$B~eLSWa-^j zC2{LjoKXE&cI{Da$chuH|JMl9)N_YcoKXEuu17>mX^rbN?%s+Ms(-D6$Y3}wH+97c z)&I;1rZu>6ncRmKCsco|8vtIlz8I|ZeTG}q;)Lp-9Wi%yk;!o1 zTAWb*i)sH>pJKX2pixaB-%|Gxf2&hX=|WTgU-IaL>JRXAc9w1+85a;MujdB8IHCGu zJbiHPgk-qwFHWfbN4c7cjC#Hh5GPcBuv=7yrQLkPcLm~v>JRspqgpq*EUt(QUn7VU zs(<$(0$^27U~S7LJ;pa{OF z5GPc>_ihk;g&|I;exE2pY+GgU?S?p^`Wqsb24+*f?+_|m4Bvo=6RQ85swFzh z-b{wCMQm@ZOZ;~tJOWIH?@Gi8)qf$v({g-)B2K9Odl8M^qKU7#L}W3SIc zq1u*lcc<~3gkt|BwxO7gD5p`Y_zjFc8FCW`N3Kj*vH$>r61 z3e+5vcm$8zBom=6bu@kNHA#FI z8{oSrz~CKAI>FayV-llKEXC6l-_)dTM^*XsU979;T$fJ#8HBw?jxvU?=@Na%w(!e_ znEEb~j4ylXyBI66L_)hRzKc_Cp;q)=jH&M;I})D@k6}qO<=K#{zKf1KgjlJ}+YDytyC@A)j98Tbr`%57h|FCVlHGlAsvK-zKa48o%s}GT8|=x zzKgNYcTphhM4t!KdR7PteHR5XjSwD8>+K;V^j#E4crdNvLMRo5zKg<@7HOlup%nTq zW<%dawGq)bJebxB?9qwuVm9<$)G?hZpZvwtcX3~_q0+qwrte~mzKe=$=C2G1F$>>C ztz<|BEoojGGT^(YjQt?TG+z)h;Jet#=rjUMcp(J7i`_x=Qf>ZQh=cE<;=*xtkZ(fB zKe|9v$#g@YJ$x5cKscl-ptP9~`Yy)kyC@hpC$t?x7JV0E>brO_+82nCwzi!ZBGh*= zijbjd<3qj5P~XKULJX_z0V3$T7*pRx+LEiqw7nWK)pwCr6igS>z95tCi81wEjFz0% z8&4F(mgSzzKf&tDOMV9+V%~Z>buA<4uU${R+35I#hCgob}4yA$W-6O zE+uau^B<#8QhgVB(T8FmCX>F4G4);S!u&g#yv4-ScQMMG?qPbcm+ygVA@E(y8D}#+ z6F_2t?_wv!v`r+JzKb#SUF;%#A(?-H!Dj>C#YoZV-0`*aU5o|3i>?dLBSU`ZO??;T zt-qV)+wLTjzKb#SUF4n6b%f4XZ7+og^<9+JN5p>3wEqwR-^G~vF76^?TA%(7puUT{ zNN5{J06i09>bn>Awqo@qX;$b6GDXgE=Cb*+*cEEG7eewU5p~MmZw96`Yz@o zBJ)PuCm~jS7o*s4VlZvJF(OiFOnnz~VWv&I3PmmTU5p}@!!CW%e)>TaMBl|&;Jc`! zFWm8)$z^nntoSZQWoXlG3;;3pUCc#9oAykA&8qKW6dP&UhXE|6zKgjq(^fIiX%$o7 z#V8`uw7m$U?_wbux&eZF>iFIb*p)pv3C*l52`3QA?wcd`3YkhHMDx?b0_wZieSywpUE4ij0rg$%zJRFDgK7I$SU`OjyDt#-l4+~PWUdCG zzKh)#;5tATqf=(F0DTv;>buASSYCJD;W`aRuZJlovIKn>v+BFJn-ZOaFynfbpzmT< zd>7dv_;@AkNL_@rJx3Hb4YKOH7`X@Qt1T(OgPHyTvGiTc2EL1`Fm*9)J@Je#zKb#S zU8E}EDq{ed^j(ap?_w8b8=3T7jH&M;9|FUYN0WIv7ED?7U5qLiwpN;D+AayOG4)-H zVrj|__L~rKIEho4}*gAAdIQ+ViXpsbX)+-s_$YH7FOy+ zE)K9U^$U5u&k;(`qFx-1u) zcZRASesfp`+6tjgGBC5s_~8+kQ!=gEri zqVA>D(04H_zKi^z*xDNUE@s7dkpisUTQ@Sqcai%Tc6?D2jvKg-CBBRL=yfE)X8Zv1 z=(`vb-^D0zLJN8HU5q*3MQ*KGGvPqe>AM(nzKf224C(Y;j5*&$p=%$GKaWiMF2=-n zk+(ftrUl2}O&)z0W8%9Q;mO_5-4g#w6nz)7>buw}UCm?&`p!)FiYWRn#+>h>!h|`! zl8vVCVm9zyjBv*{kxSpjnD{P6)G#x}cQLEJixGzEN8`fhQ0!8Ut^0^Loe&>GTEn>2 z*d`mFkNwf~)P3TBrB_Twb0tcsHT#@Y2Fg)s+=i86_Zd76l$Ebxn^h^^KGX5YpI%kg z8c_1~VL0b>M@u50B=$*70cG`E+@`fua-YvJ_oUZ6+Z<3bvzDw{Wzt7yLdvAVFVR_c zaxI1?U1zB%_h>8UP^Wt9flL~{(64+I&wc3zyC>dH2Bfplxbz*om!4mqN7B6$toI$^9MYnW;2u>l*gnU&BUG8^~;3dOC za=8DMe*8}KJIh3cn0)>mRo@Am9mbzREPv!X{kQM2Vv(X{7fH*;Uq{{`oTuN}j^|gs z>V&5)9RDZ*dxDkzcpr$|1<>|70e`Iopf`Ri9thg57T(v+=h#)-PqA_k73UPQTIt=1 z7!U1VpJQm|+N05D=@v_sVs@_mo@HBY0j8Oloic+3-=@J%v#DDB-W{t|>8E-2JgCeXy7DuxnkLR$j^wVs1gtK2| z!1*KgIE8_0yoMeAqWz+eUdOhql_)BAIqDgc*bCVdPt$Tb|L<5`vULN}ui_;~Z_wYj z)Qs9r{tdwC4=geGG&45&OH54Z4_#}LvB`@tlchg${L0wmdn=Lu*z)^je5ezvyA>Cf#TT)6?M4=0BI*yAFk4^dD0 z3%dkn9H2I^1z*}*VfkNcjje5%C+vHuFa5R6f{tF4OW(9EB4-}%J?ZalQB{O9=Ol7C zsiX_>i*AbdOL5WYi^n-vhy0$Md>V!0-xuc$Cu|_v@%nMl05(yQM$tW5A@{K7C=nvd~)+?8TH=Y=-Pne*Rn}+CA1z( z%)W)dG1epSr6YTAFEnkeD8@@AMwt<1G;k+5TJLnt8UI z^^El1NJjW%Y4^c4nMo(!!!gB+yFUk$0`@oksS5x12cQUPiu5swK0KX?)3ltX6ZCM( z|1WNoHB)$V%Wp^T)I6~lVClqEf**(7xbL9~h)cvXbh@%9!(40R8)ECrS1$uV#iw87 zz3+qfo_-l6JUVTe%#GxINZvanxQ|LR5Agg{io2IG9C>~zD4#<+JLJOR7XF^=kV}j2 z!UHhr*0%5?*df;!mq8p&%G#Osw_`xMxwsL#2j8=xJh^us&=`{E|7|aQJD6zP>`k81 zf?v-$UOs%q#q5>Np!CPl@r|d$7mZMBN)HQ(+XJHiPq0keN#yi~){Q&7$=F`d&E^lK zSlbn3UW9>ZyxjX4?vlEkaLjwjycz9jyxJSw&zP@R##Pw%mymg*XThvsUYTl$ZC{e| z8E!X?w|GCpW!t5AHDw9&4sSh}yB4271UKRv@AhuMe%+*c=VIfTBLanaV-(s*#VDu?_vhoRmP)4aMT-r=gr5s?IL5Q-RXh*wC7`1 zWuMzJ?LgQk!DABdf|z30rW2p@RH=F_KvPnCqdEECZ-66L5%0~3{In*SGzwMh4aj3H zh=Fi`eQ^hXqih2xw(MZcq?praTA=>)3U~~Z^#mvX+HuwlsGx`+e2x71@Sh((po)b` zn1>bF11Ewwpk{x`4)6}zi4#j8Zb2Csob?-VG|i8M^j-mD1Q@;?sF*qU4MNUi(cX43 z&sBg^Y@r>{jGF2jTGwv~*A7anB3<*_3?L(ujN+&3BOkyywDLoy- zQ?_kPVhT^$gtP$K4@3T}45p$(8IFTF#Hrv*?)KAget%uSVa^`96vHKJQ?3esD-?Z0*${nzX?n@2>eGKT&qF97}m|1!tsPrI?eXn%pyJ$NbMGn6V z)H{6kf(xfG#-(2)`o^-CMuJm8^BFY;S&6kO*#nR@M@->}_Gxh?X1UZmF0uU25W5Yx zY&+Kx5AI|h$=1O4to=)X_=4Ayv0$!?e%uvoZ@^IVNlM$moR#j1y*Uu)OICc_f}Sxc zJ{L*Xr4zJ~rTc@K4_01{3TM8HaGFp$L3?kf1MN_pD3ZA@NM1ticjG|&H=-(YcF{6p zd;J<`i`J~g*>7=n)cguCdWp@aGN?W^A~>vkZz_5On8mIm-ra}{{x+c}SDlUXJ9u&l zPQK>JH8`0%7ALphYU7TK=dtZ$k04Y|mEY!&=PhT8Lt!Zg=Fc zQCnEm(d>|6PPb()b<)-yjG~kn@Wifk*nds&FD$|9f0DiJ_q9*oOGN3 zAz~=#GFzcN%a#uRkM?ZXx`y%2F1~W|%qErBX&0+6fG~FAT31-itYh{x9pIgyqKa^U zKSDkj;J-OFTE55`VB{kM{4HQIz>XYh*8%O!dTB_fgmuGEGxZtgRGl2C+~pF;e3{eK z*C-5s9dUFaO>;32QO$QEA839F>X3Rti<)NUzg;xH8!&1<*O5cbvl9Z%w}KUz$-kWD z?>LP=>Q?jHbA>IWFlv4fG#9NRGADw@-te3u6uUI+Uzre+U*+`1^&(R)o~^D#U`m0u zJDlWze`c;^xp)Fw&z4xEoZ(pJh=oX_wa->pWO>5 zocY$`q+`(xZFvjYftjxnSusi1tj?KlXKq6;KyjJ*zH@cwZGlkyby|4Gtxo#uB7NPt zPWnQJJR&I3#lVtZ=XB|Zs0o8~BHBvxY5_kw^Q}cbFt0CMg*!oun%C=2rnJYbu7D_- zSMr;js>V)2RWf+zINcs|X)t*Ep(uMO;P1$7gA86k4N7+zypvf8{-mBBZp4C-3+nl` zt0&<1cDT^|q@I8plNSDNT`X^FuCQAv41XPOIL#jdB09C1 z?>jYL4~%pmXi?3#A|KKGe88yr?T#EyZJ&plzuqZf-J?$P2`A{FO?7D~QKuo}NgbiE zK~tROO&EO93W%ULDnVm!9N=Uv=~m|2T$vk0rdaKZu0&u;f#h{ga=`z4u4J*=fEtwU zVzm#b<}2sTQLD{3YYk$P6|_35orIH)UbD4%JE3gUYCp1uTY zgt1?STkTD?+P{G+w>40sV}CpHfz_^YY8Wrc!7BJ)`%v(d>K(XGtq za%CkRNV%Kk5D*d|gVyHc+EjA^1n6@n& z8NhImMm`*yVeNJ!;}8zg$cF*xKD|VphiA)1vk|X#&RL*A8^wD##_$7%Ycy&-J4N;Rm#YRU0#(k@*#tySo?qS zJzM3jZ^rubjn`I;q`V)*F3CpZqi@7^B5z|>>Hjxu|EDIyoHza*otHB9J0yF|7t`8& z__C!Y6|?h(1SD_c^;JL>*b;1TC@OFBk;^ukR1Z57cBrJp#;Gv%R4@BB-fbu;x$*d; zK`OIVeF9Qu^O~g_Osd>&8yJvUHr{n4P*rvgp5?UQ)-CZ?kZLXcL6v0emXRou%Gfp4 zAOI|*tk$)p&ZZu5%OCUvYG zkuv6QA}_Id%`xcHP4=G{3?(H~4F9%y=`rg}>TLUVDYiiOld6yBub>VdwG;1C+F`AT z7;2ASjy4Z&;^wp?dB?wr_6;6A8a?PpdB@)h{@^|Ni@}bRIQ|xr#yqoENJ<|6C>s0! zu=gflQdQU5@Ht)GaA>-r=;|(dsOqk2s++kPM1}^Wfo2pCL>y_EL_h=@5KsaUM-&x} zvqp^?6B9FNG>MwT1Whz1CWgdBO-yn%iN;*xy_%S724nvBU2E@ic2yU7^4;(L-Dk?6uy#_PqBx`7>sgrdxz>oFciOgh*n6m{$whbAA<{vtXZcx?YVqaMnix9ryKvB;D&K z6vRmWfd>g4FJZKg^!u%A!$wfk899_$2E9< zorkA9hR1aj#=`b<&hSccR6d@Oplrf;2E9(^d1lb-)B_T9eNKWE-<4qXs|@;VNMg1G zea@OD!R8t0>7dV+RT6aXlVI!35^Q@wf^&WxvLyz6Ui?0TzGq~auJ76oxC24ob;nAu zeys!>_DFEn^%89Sv;=2=U4l(7NwE2S3ASW7HT!O#BEdOpB-nY01iS8);N0&?u>02% zeBf^q?BT10LEpVO3C^D=!G+5txae#NF1}2HOK+24KWSHjz6X9P!4GzqTnlHi8( zCAjf632u5+f}8nK<{){?I}+Slg_ST!et4J!w@sAb_LUOcv0s9Z+$X`E-^HDKncDyQGzd@BEeU7OYqpe5`6XR5*&I>f+u2F zEraCOa}s=GwggYEm*A<(CHUrD5;DDp0eo;K zJg(W8|0%2`hz}4B*DQ9Xs&QP16O<>aafyG`@*aA%(6v_cl^4J7!!Rs#t@ZyDbggfp zI6~K2|G!Arn#O)DbglLO09|V*z(Utr|M%!xYsk7PbgfCDYpqX|4CHGz^-9+|6m1HR zbRg+k>y@tcRP-uDwbvq{(6!c+uC>n;#LzJbe}JyF-w0+dbglg&y4HRlT84_2 z?Hl5G{UW;7ei2=3&D3X+pU}0|Ec`JpTg%}Uutf(?#h-k-)~0Ki$B@AtsQG&(LjY_H z+VFQMeGaO$0ojUP=2XapuC@H!66*W}B}lOn!LSTTTZOK*{3>6rwYv>nYlaSs(6yFd z>uYN5N_sFt*II7rT2)S$M(A40gP7Bo(xJ9;S*P5XRC3LOj8)8unbYNvJppta0 z<(96M5;N?PC7NkJ1ct6PL;p+YTFW;XTLti1x@JPxT28uF7F%m9b(d<75W3d#-)VDZ z=)DPDYxyZ9+F)TKua-WY(6yG6u9duo;I5)vULxVw9H6lA-e_bgkuim$Kf%9VTb3%)we}IZR!(SDslHn1TC0Vw zwOZ*~GxS=8uC;tsQTD+}maE02}Z zhjgtVR~{|-Te{YYW7w}lNY@&y@j3lRFUKi}(6z=Fj^T-j(6z=Fk0o7ed`T%JV`pM= z#*e29;pIrz8ehZ_gh7xaU2A-43Lw(8j=lsJLf6{AAxFB_{wcEA5=gq%{^_^?1FR|> z=%WuKDs-)&LEntq@KTQ@BAj%sHddOuqJuHk9Pb3@Zg*mF@2KNZs>;ueW)}-xYvsc^ zTB&?QJKf8XuC?+}?eic+JUPAkstN!Q945^-8J@jg}PS}T5}JA^1Ryl)k{)`}mSB~azOkrleuikDn@6Yp$= zuC?MPt{L9x3SDc(qtf3d@5~=5CtYjBmmM<+icv6DB*WWcp=+(EHXEL-pR}$OXyP5S z(6v_lN$O{|E&#j?Z=;2-wZhW1hFgnnN?IM>W(!?w1>R3$nIapSNrrdeLf2Yxz%40V z8nZfmgNR?_X0GCpi=Y^F`8I--RET%)Lf2Yh=~}f6yqy=i)`~B=Z8T2kS_cf3=FIRe zU+7vZQci1lwY=>Yy4DKPwd!)tGXSA$t@xEO=rkHn286D);#Tb_Nz=rW0-e02wiIh=~|`GwLD7@y4DKPwNfU|F38KMcoJ)T#W<%n>Iq9R=nZ(|gs!#XQ_|*5 zJcAIr)`~~j$9lu(Wq2kbbgdPqnTCz(k>N>&(6v^qGx^ByP(KSsj`lLE2_ao;#Yf$u z$j(I_dglsEjtq}Egs!#XK7)%O86JKJU2Db0qWL7mQig{kLf2YxziT5hG@9ooLf4u& zI!C(J#6%XDO9<&&6O%L+9nVxh*CZg!~Jf}WgJ-SitBOQU30m{N^_)ZtvR5vAV<2^nkzI` zkt1De&6OI9=SbIDbCt%bbHfKCc2HxrInuS(T&=NGj&!Xxat~87D3c>yYmKFA&5^FP z=6aSoq-$*)H305bK3(fLp=+&q>z6&*_?WQpq2g)qGi%6hO&H({)smuC?ZUZBPoJyahpu$unOhf^@AlzBn;{f)YT! zLWmgQv7XSi*1RqyQA87u`Gl^u=C`hx$9{e_3v1|FnX$Qp;FXj>=vw0|xILSnCuDds zD0HpyR-aCQ^%_s2oOG@6r%O~$2?|JGF~bu?p=*u5Rw8#o3AvqNHzfp@;o+jtwZ@+} z>FK*0C6641t~LIqS)1f8KvVJ{Qs`RaXBs8tX1n0=q|mj-H@iwE$C|}cOQCCx-&91) z!%U%TjepCn(g>kzjek#NnNx`(U2ELZwaPLiOCxn?0=9LAr=UXD8n<+<`FC^)0cSo! z80lI;`OoI@6O_Qn#}K5%5>HTtt~GAyTE!BbEbTzG#1mGbYmL9qvqgFCDs-*!pE*h| z!-H6%YmJ|0T0~2UA|y^$rhy4LsuM?ml#*neX%8VDP^G*EsCjUk>B3temcvBFXs1dot~t~Gv{i_jg02g^d& z8h^N_lE=+L*BZBUtulFJz)IUU@g!R4TH^;y4HdQld0asBXOZ?jsHF} z;WhDyTCJ-~v@El+0TH_N+^u{@YH1Tv_=vw1% z6_!+NWO(i`bggkq*BYia>kS_X2wiL3(zO;Kn)tLp=vw0sFPE-jR@z^Tdl^1Q5W3d* zFtY-126-7iSP;6_cwZwI)AC1C@|lCswZ{AVg%wZ)A4dpXD?XW09Kk0QLf4A#q&Nr} z+iF7yUEjlPS^OMlYG5@L9@mxFapSk=DVq48L+D!Lht;H%Se2ZYvwF=lg5@`P!HDqIL7P?mPA*$}Dxde;q zwZ9(%gszo4ly3kh2p~r==~|nGu2n5a9!duwC%ltq6Ggh#W<%Gi2$`twuYPTD||9{v08sYt1TM>nP|V18-A%6IQ>h(zOnTPaSFdBVYfg z322qBHBUQZD7B<(&4zTXlCd{^2{oi^&F0ayN}+YI$eqB9-NccuH5<~kIv5TYz2lvJ zH-V&U&F0ay7J$9!PZCGE)~wRCvP*1r)YOdE3AqXbG#k>j%HR~2Oh)L8zL+LL*P0FK zS_>g~J-u2uVVrm&o3 zm9F(@8AG)#IM5Sj)-35-6*uV^E6jneRWqs1!1Sb$$2AAKR#i-f9dFX^Fa_vZBZUqW zDb}RBLkQ?vi$Qd%PI@xLfv#0?VZR#4FGI*Hc@QmRFWml0m2#kKRRh{Yz0pJhsm&5X zy4Eb|S_Na)gwB~%@ptc8rE8sz`dKql*Uq&eLg`vvgv^P~{UJi>T3v(;tj>FgAYE%# z=~_9KOfFvMpU2D$LwHEZVH@i1h5P@VZU26pKI&;*Lt~Kk@wHoU)sC)?nK4*IR_R*hslQp}J8z|uzY@gsVgZok&E>%mp>(Z<2xr~s{Bej?x>gq(E(~7hUo0%EbghLjud^9_ zro~daRu{1ji&#QRKf8l4(zRwSU90K+-oUi4qLy^6IiYKHZD`f5vVg48wH6}Ws(s#K zb4u6hVx6izZ(&)bYb}I%oqr7xO4sTloT|0p{!O?|vzD$^t0vo7gzH=o;*_qnhdHh4 z-VmmAtwmvOeLrav6Fb} z2SDe1!bsPeRl3%qCcVzHLzvRF7FzQR+n7q%dgKIN=hP6Zbgjj)PRTci36!q2c!J2S>-)bFN?t(zWJQ-?gxuOV_F}X+buP*GXO) zp=-^$bgexy&5SazoJ-eQkf|x)9E;7mbgczguXD47N^NpV*Lq}}X6>bx!+c|q!&FGIg&G6>yBXKx z1N}?oc#%*2ATHNKRhzvo$A>6cOV^qsU2E3TwdP3hm%VW)kfdv!It_P%v{jqEwf`KL z;6YwJe}kA4x>h|)t0!G+PUu?sLb31Hldd%cPoZn&Ifmc#XqdPR$7IcAc(W|sIiGZ`X`yRfEIsJWCtYh==vr0c&nI1LTIgC;QZk=( zt!bfaRY~c5K1ojtU8_oh`RN&u2wkg6;`2$@nijfNm1K@3U29tCT2*4`T02PBnr`%2 zPz=1-K+?6Qm9CXCzO?@n-1p&kkECn0BK{~9KXH5lXHKUX6zN*iO4rJ_9+0qdCk{4+ zu65vqsZc6i>zrAl^cIk=brLx`g-|pNy4H;}2K-cO(XxTurgpjT^4Z2o=*oO==sNwlnTIgDt#x>%V zk*+l}g{wpR24YFqnijfNNm{i>YSw;$y5YD^3tg+uI?-tj+doCXc-*+Ag|4*+p!1sq zkgheYbgidiQ2Cm8q0qIam9ABaxab|C`P-GW(6wr{*@xb7Lf4wMbgjn;U2EFXwN4+! z^1j9at8}egD&eg%0Mje!TGJt2YthEUTKQi{o>~b*jlr4tn_^etjSxu32fHZXWusg! zsl&MEXbI8}`5mNdO-FRC@;j2W(zSBY#K@ZQeU?u;B&2Jl2|v+u1G7P?kP zv-EFPL%LQ`V2+cnHSN;14x=p=?n1!F2Q7%Ec-bhIOP-qSd92-#^iS}RqZ*0d^mRG+ zVjZMwP5*~a4Cz`sNY|Q<=vq5S*P4FI=xbvgq-#yT?eo=gyr>f!$lq(Hm9F&#*x&>t zU29tDTCIq5t!brewIb5Brj@SMiUx9FP`XwtB3)}*=~}IbbggNnYyE}hIAcURawc7C zI;3lrsqf90P0bSQJ?W6HRW*ywq=s~@X`yQsY2uGCs1F_&T^j0zu2uCM6h?nns293c zqepjoiyouuU78lUR#BO`x#-0(nb5UH*?KeHrAkIgNZ0D9r`NEuq-#wpT`OlB?)n7c z&6q?j=~~kvU8^RJVI9z!Fr$kY(zT{Tx>g7CCbZs+3y3+yz6t4C9SldBH{)Z(kghcy z(zPmv>cn?wXR*+=rbD_`)l;R-hq1{>WAFp~O`VLn)XL7JffKw|7KQ&$0O0ooP(D6* zpO>R?*(jHd0e|5nbgi`!U2E;rEvRv0FdTnVp9#YEp$uJX6Mu9ibgi}W4zZt*w_f@~ zidVm=BVB9l8~SFH==h^8uZcg;61vvf*C{YBl{VeOYtpsWK1v%#yyTC#{wYf5kGQ^a z8h>RyLg`v5_x)+4YaO9=0 zUYXt_FTP?oEU6uZatK{(3+Y-3^}X78p^$W~B1}93mDhPDHPt8)KH@SRHubRic}dpE zItg8Ci_*2KJ7JV}Qc1ej7Nu)_g=tKNolk^HrEC2w>ufTXCAst8h~S-Xi_*2`CHHAd zluFmyGx;DQco5&BbgfRg6TgPe>zqXd56N4UuGPuFAl4Bw&dIoVh5u|Hv zQM%TiHa}(rg`3P!I&8guJLer;%m4r#Ymthe$vs9_|#&o)wUKNmlz_e4l0qQ-BTV6L5&}!rIL8e1&tKY z3n6IG`wA*`K_?P)s(*Be?+tF8hrh5=<^m)uZzwCt0dzE4s~p#OWf6uO1473KDM(Vh zYzzkDvRe3F8-t^8UBetM{xWKEn!oXF?1Y1V`xg9X2>q&~usO))xtzcc57jLx6C{JuLk28MuWhF4rV2b%YKU+T~&lx73S$LTfO#Nkcrky6}gH|Lb4Q_ zV);^BO~pk_ipUs>NUJMZIre$Rm~4wouL({63EV$ti19}DZEALm4Sq9{rr3bXAH>y} zp?St&o|cz?2Y)Us(5xk`D2LE!lh$0`>9opUeBKsZ*E4d z`ounAXqdK;hH046FzL-%#}=|F4O1GXrVMlUw~!QRn9?v=5lNATDGgK3pd_KvFjw!`lCKc`5MjEDirD0MLe#Ar?rh27eGP06wv#?CmD-Dy8m2M*q zQ@zqK8CkH6G)(nM!(?RfZKPqUR~jZG%WNYJQ@zqK8Cl1ce{X?IX_!>@z!uUl)hi8? ziso-44O6|+FsZ22Pmz>i3u&0@m4<1-9G$&9#i~~tCKZ)zAq`W#(lDv0bPH*i>Xn8` zMP7=e6I(&URIfBlw~M`CD`}YOm4-=0@f2wtwvvXaUTK)VsabC)4byO;Vd}(aB@NSd z(l8Ad8YUwu*-jd!;X=b?M5WtF!!%rIn2adcP8z1+Lc?T4@$IBx8ZIDq+eyPTTxgh7R9Z2gG)%*VhDk*w^Y6wYHe6_!3}`!Pn1%}tld)7WjWkRnl!i%N z{b{6O8lf~yM)D8bAWS0-(+H(u($&pMNW+vb2|5ZTq+xO~O2d>dAq`W$gfvX~64EfK z#M^%kJWg-SAK;hzuS~&SWBv*K(hsq{u8RSHTUzhb;n+SvztrcerHp!|qzUR{=$HE4 zybz*ZN0-7*S=nFbfehjK6_oTZfuwiuTI{%_Un&P1&v-_Hz7v`lRGfU01gCD7pzBr% zR(w%{)nrZx`fvD~1ZR;BA?Uw(IBpq&{#)it(7i!|t$Zgr=)dh23C?*^f}OvRVAp?1 za9%%*wV?l=;S%iSmuiCkAKWFuzFQ=?;42bb_-zR;`n3cX|3!jJ8hN|c|I!W#_OF)U z^4$^~xL1N_mPi4=`*Y0Dp#QVq6XD_CN$mUoF0mh^CHBL134U~=1kWEY!3*a|@Zy&k z3^=14<0}}jcIW^G>&8j2evt$l)=F^JUI{kdAi>%E@=Y*c6MvZ*4A?AeN&~j^!K4TV zY;Td^oCOl>+$_PaYa}@L5ear5mf!=gOR(n;66~#nXBG@NKP$n7B;*JNT(nGri?>Q} zDSuxW4A_5%1P7j!;EEqeaOH0#c&Ha9YcSyRLnZja5(&PzS%QaeU{Kr6U&95pldF>q zri^A#Kl2?4I{zZUteFzbDZ#=L)X(iN!Mx!T%%3a4v1=t*uwR0O_erqmaS4ulkwI$i z`x0E!2YyYEx;8Gsbweb$ezpWRte4=%OC-4IZV7JwngqA-6f{WP`WFd4oWOn+q;AVf zaQi$7?&y}_BiBiA=a(e7>jeq!{!a-$T2;?<_l%a{W5TACx|g(nLF$tqlHgO1Nbs4T zNO1qV5`3;23saDKaJ&Q$9WTM>H%st^0}_1k;}SglganWLMuJCiM}>4>%1ZF%84`SD zjRcQfBf(ccEy1DhN$|vPCHQ(}n(4kVN`fbsNbuA)3BGwif~P+%!MC23;M;FW@Jv-B z(|u>S1kZL#@WYi7{A7;=FW)S|YoABZCspo0FJWK*_Yzk4HCVaJe7)#Nti}r+j2;lVt?zuu37J>g@h><^RLHJf;;R1@^0ipOqCe+rmEL~@)Oi0p2DS9 z?Jt1JDyoiwf=gqKurt*NJ5!CaGd+tOg`KJ8pMssKH+ExTXKMLhWM>+Ogu>3$@(-{x zod&S5GqwCZcBbSLfC)R(z*cNQeoLaHmh4O|%Fgr>+Z${H+Y9H*r{>;2$q_sRfkYU2?}#3z)_6CV=!u2_X4FY+E3$Ao2teUiuJ~ zn<_!ceN#{6&#dX##K6f{Q~o(jdA^%c@@ypER8IJ&#%x9gev|S|t!3jE=bOrmMR^S0 zRECoxt9(<R~Iqs$(uiq@xr^Gx`rDz}_~`k9F*t--k09MDn& zeE{K`s(e~f=={`*(J>IdsY*|#w4Y!uUW5G9ckKjFOU%??{k{#c=n`UjP|zH_3sKJS zO|{Uc5WcDSnpSRa@iS-$V_-1(rs8WgR_e8IZx_C)_(lbuy$~6Oz)czpyq4Vp1m9GA zmjchFBm|zPv44PXs(J)77rv?LYiPc#2Jz|-$tC%ws;`&Ji;u#k_+MogLfSC(VdN)# zQwg(o`j>4a}8 zetn4!E-eh5L*bi>H^(?M`~-_6fACcyDzC=iif@QTHPC^TK8s57O~rS_bW~HK!`_VQ zZU_wDRGJQ_@J+=x8CwPL0rW+MZz@i{DHdC6EOnP^w+Y`={CC=%X}YVzHx)mnL>nwj zVZkmp;@J+>+ z`YVu7yU#_?M;5-R_%F<`(YC&ga`H{Z&oeE?EE5!Kn>Gh`<2u7;OMio4@=e9NO?wHL zI&T!6YVu9>zjHL*Y~h>gFMLy+Y<_}&z0u?!kU`R4_@+3aRi*kG;hU-vzNs4Jn@ZD7 z7rv?Z&{!cpO|M<}rs8+{nvd3tZoKeK#rHaLw?*jo3*S`yBA=79XPZ4l#5%TnmoL{k zkfdu2-&FbqV#qfYU*pTQi&4I*0lfJTzNt9*rZ{NvpcFS2!Z#Hk7<1Z3_@=7bYM>qS z3+#TvHi5e4CzNtjgM3rwUG0;Sn zZz@r1qRKaws54RJn@Tj8sPauEQYNZ=Q;D>RD&JJ1$$uBiP01kTn@VK;ze-g3rV@ip zjg)UHG1x?vZz?g=zY85>`KA&heQtdr-&CT_zYOY-Zz?g$kI|PLbdevQgZ(pcl%J5e z;hRcK@joqi!#9#O(o`-xZ#^h%r$YtH}FgD-&9RQEBU5sQrdc6EBU5s(tmlGc0v89l>^>!x~_l`aurAmBmEW23v zrV|>r?c`9@Fy2QIJDO*Br1_hOn!`{gG0ZZwPO^L#g4L zsydmiHefDP!Z%e#zA46#h|{WpH|D}ORrMp?Aw-$xjk@qnRsGm3fhy;nyYNj_z2wRp zcq=b_Q&m54%?KCM$wNJ_>QU)$lXvnE*i z1sZrnAbe9*f0Fv?YNAUT&kclcs%lLM2VRf;NUOth1>u{j^85lDnn{{R4#GE8b-*p@ zLiQ!g;-x=B#IJF?TXo1qP>i}fgdigo;&FxWO;uf8!sNCsczz*#Q&nGb+i1M-O${6> z&6(!0hVV^QrJUCA2JoCi_@=7JH>Gu_S15c_RlhO@okrtPi11BS-71zfO#_ccgm0?q zwn(hH0X#1ezNsqmO-aKI;NgkzO;wR^iZXF_y)=(ggm0>9oKqY1grykl4R5^gO;vqL z+Pr~>F2Xle^(gyT-D`Z^pc>bYW529A%`|LOk2H^Fgm0>9oykXr2dzu7CwS>&2_fH9 z)koc;u#pnfp?A)~eNBE|y?lZUulICfT@J&^1ESis6O7k>G_@=7vcWp$5#`2&@ z_@?TPZYAGT-9#3cO9=U<>LzI{Xzh!e!MbT0i?@<*s&1CXYFo)SRX1B>nbx(mGDl-W zTFEz6H&l}kJ62#*S|#-&EZajV)~@-&EaFjh)a+zNxxp8e7&n79Co*Tw^D;UW*mAu1jMlwUTeD zZl%UfZYAGT-D-`U(n`Lmx-&F(YAf5c?o5rH*4jp!n>4n(m3&imn>E(uHSqXN_@?Ui zvR=lUmgz8F_@?UiO(mo`pwX7!;F^3>4f|V%HDVFGj01~ZLB6Sm%QaTo`U_wi4rnZB zCErxT6&kB(CErxTl^Tn;l5eWvDvedQl5eWvpvG!j$v4$-wZ>AdT*lnffuN`J56dX_rmn;JFh&)CfJ`KHDT-&DgR--X6+;CZ6(O*MQ??_pkqIKe;o zm^5|dn`(H10zW|khCL4^8&*)sw`3c>tV#-?WTasamE@ahcwZZo0w^y;kYV!lM~EQb zRD&;03}}cH@-afh2+uKvZ>r&SDTyK)c&;gYQw_g$#XRQ}zNv<9pMbo~O*SuOLMNmI z!Z(#%!R^`bO{IAhDtuGPR-aCQ^%^@UC*M@^=@OMwf;!MwO!LT8_@kP%BfG8Vq6W=E66Xyv#+& zUX|uay6{aUAMUB-`MU5;B`2B=;H*u^fR(my;L*GAO(hRH!<|j!HSj!M_@(hJ{I^1&WDp420SVt!@;OJR>G)ts_@E#JTkZ&sa8FN5WfSCB_jOX1}3RKzalU5K-CW5lJ zRvm@Q1eFO71Ku0`LF(3S^~&CPWEwVE$b8Wl_3c!jeU4Z5$FWFXsBV9PYW@_i?7cm( zsq;^~VEH5cl@j_Tl6RDO{yr@Gl$x8DR4G!)4`<6t{b$B`9+SBnm=rQKjHqR-K;VA@ zwN@WR);uSl#YFK}!etfyHEpPYwc!4h;b@hTxvm$A&%{-R^_M%zzR zvdL1PrS9zs(AE3TVHj$ywWQypt^&96WsQCg>9oa|P7_b|*KRq}D{J=G_caNp_^%ED zyhZv^O-VN1RC&rURUTa!%CM=yB$V!$yVfZgYntv?QQ>iG$(_`YkwdF2yvM7mjOr% zv_DS(iNl(NI7}@_9*GsmTh8mfNq;1Y#9>W_I7|`JrG$Cy)y$NvE=@ulCWA&Z@+OU^ zj>KV2hB(YgEv1+)#9>W_I4sg~8E!zO>tTuwZM_$Sz|*)jbo#_fUMaSij? z@>L*L`Bg-qQ9l{V))<*SlAR&MVHpsI3D7?NYYzB?cWUq*{LSz(!y%FREV3&5-Be_s zWuG?(FF-JnC%k-u9AqcXvxhkEY{yA5~YikKZSAQm(RzMnLn!&r(Z%G z7E|IdHY9$vaTo@DZ`Qg{tHfbO`x3Q!BR}gBLY}~oh$(RxnRp@tAElPWVKF5Ri{sWM z(mqZtiNj(_9G0h@{Svh#4vU4vVUn>oE0%+X#9=Wd4r3o%t2$UXyu8^Xh$C@WEF=z- ziYZ1$l>ld*Kp=_3VoDs=)1Zv4S-XfMaac@=!`LNZ#+v7?gpfEa77~Za;G|aa^=3at z2>CB!A#qqCaLgKK}kT^`oh?F<{D2)Y!+VXBbF!5t$)1&G5U zMKlPo;DrznhZTe9RPERm;y@gxxUgRh-na}m;2sYxM1iNjokZeb@9LE^BO5{Gdt znOwZo_E4$BVH`zvxtMzml_U;}DRG#aa_(nBr4om^DO2C3at(IXm=cE_Rp#XR>rko0 zVSE|J=9wzxVn*Vym=cHOr6h-zDwQ}aFXb^*%5tT|VY*Z3%9lEgO8Oo#B@W9|o=YXS zn3xiWxypGTrjo>ArIt9Xpr5^Yk6K8~5{E?)FZDdNBo2!yaaf-9cc^?R1M8MJ%t<=0 zawxQ{bj%Wmi55Rlqk_a?F(nQY_rk35DdA$`@0DUo9LAl{w6CsMsm)9-2qg}qA7}?2 z$FDc{Y9dyh`p~PW@2xr|$T@hlHILyU{3j==UXJIiV4l9ItsUKQI zsS<~|h;V`M=KYSa!#MkiS>iD5eW_b}0{ew{sHH+2=GxGzO|yWQ5{DHc+^U^vv875J z=34tPr6dL~1KxBo2!yaahs(%)4OshFB#I zJ2KX7_ZMxdQY8*6p2}(bQryRBk(4;>$O*jE(IHle!-`{_Zdes2P~tE*0h>tsLWkJ3 zAy$dQiesIUe>F^?#9>EHfDbW*SS1cCj&({d3{28fN*q=^LFCq@mW;CPsKjB#6G-v- z2S3>l6DV<5@dRNjd8voO1WFuMJVE4)&U%#zNE}wG#9>ST_d43)qJg9HVb;LWNI~MT zQY8*MLW-yp=1*k`5{H!vaTpr}Zy|*ZsZLmG15rE@C{^MxXAjnAl#+mdym|YHJr#E& zrIt8M3(T^3co!O45{JcER~jQfzW_Hn{4rAMd4 zVJ^(c^amDJs>ER~EX>rH{FBATlsK#?)=Oo^nw(3OILw7P1)M_|iNj(_9OlBD0&cgk zQY8*^VTA>})na2x999(Tr5+DqN*w0GoC3a1*d(00#gsVAg*gSR20~z^N*w0G3JXY@ zI5nxnVMVcCYIO)x;xHHH6!3DwNE{YZ;xHHH6z~fcR;t8dF08PCKegDH5{DJVda2kr zlXIyOhq*APfWrtQaac@=!(5nCz!NR3REfh}SYZM8T5L>-!-`@t>n*HQiNjo&Q$U_v z3UOFWiNoYQXLZYE%$tG#CSg1+jVW=Moc0+Q>*1y9@K+F}N*v}Q!llaW)52LrYo$sY zc4VwuhOJ=^N*v~M$s>mmf#yL(sRJF0|T1gxhyPoO!*q+2;u^TiNw32Ejb|c@`Cvn(B9?a5W zZS2*eKB<;4vPtKn4^>3&}@l2iOMvhwn`ipS=V7A z2ON{GBZ|afF+&`tFj3C?9F-&vE49R7j<)@~)RH(XCd6S*F}x{499F8tVU9w}$H9g7 z+SsJ*Tk|$}bI==t3`axR-_C))Vj$vW$@?>4*-UR43V}7KxVNv zVHQ(Kd;yuo+Jsq5C7Fd}7Hbn`F_jo*u}(6JwKe)IC=YRK$t>2U%wm-B2Pb3}Yg1-1 zE8_30+mu;skU^1ItWBB4_WY6G*_ zQ8eXsd?1v8SxjUJ()@TG*M|x)i>X4UeaFM00?cAj3Vm^d>wwqslMn)CF$Y4+`Vgz* z9YV+~)&^!Vf!L=IHSN&wfUHfJ#e{UoG2*q4A%JvZZ9*r;5#d*LN_E4_FK)Cco!Cg4*TlyOomiXFiAlK@e}!oN*s4wF#I&yL zIB$v2iM4h3AH|fw56Oj2tZk;>3yV+8pCv?0q!VjXIx#jJd{}9}Cpgmhvw;U|8LzupNC@fw4wP3XiN&9ZM=4e7*0ftgP_u{Na>rP(PE4eULole%l@~7x^+G46dJYPs-w^7BPE7Ue4{z}$RFO`sP3Xi#WhUfe zA!3qjg-$HW)|>q`CX=}u(uq0hSwEwibYg8vC&t-^dqIJCvwb!N>BQPXIx$U579*Vr zvs;KEomg8)C+1+j04*MW5JNh#wvbND!EmH`vo{e#I4jx20F+zQBzyN?$NIEeQCa#0ZOSMr`jkz;O>BRn=LA>e?NtQa6${XM-3{pBV zbux@{J(Z*r8>Dn%pJW=7Vd_ArR64OISt^sUEXk>Ri6CRxAf*$_Oa84;sdQpJlfOX( z58nqVotRVZ#QjpUN|uL5=Yy0^%*ntYMiD_eu|Y~FrnyM<41y$tLMJvz>BM^4*iOU{ z+E6;N9yX>3o!B6y6Jwj(*2VZFcxrG9gc&w%b8rtYRjPl#KmnFSFKBJzz8th>(!Gg% zmWt=_Cpil0Cp*Bn(f%hy{qJnY&o{c!=0Px2Ivb4&9_BUGj$(^3p zTR6}Z+zY#skB%k>T5sV%bLA+0BhqKsiOo`zslg};cpK0)4_80KD2ko_l?M1ci1}WI z!>Vfyu3HCL4P(&oSz*@)alI~Iv%>4T4${F{i)RdYzE9HiFs}K6jpiYm)7QWa5qIC~ zdLC-NDxzw}d`F$(J0e|wfRyj;I3^O5c^ST@(^Wsq^Y{jeV`Als6|3-db!xl}Z^pWg z#^t$K?_#S~tmyV)U*M0Cysr6>?#WN;buEXG5AI{D*9^raU|SLJc2jd;47KCET-Qf& z{cfq%Fy^1pQjfU)oOvzFf$L8|;bmS!PFfoN=3JgzO2bR@u3o{h#@p_$-$FYCYe7t^ z5j?^;E?46x>u-x8CeQFsR|Tdwk6g86VhFu45mLU97CSSAel2^CyU>}3YV0L!ZH83b znXco2V}w)=G8D`{uxJiApWYq0}(I7L*EQfW10glR$U)~l*^PR zV9Z~64%Vc5aJ9nAa8b%|P3n3aS3C%Iip;vN`vv4=bBO&-HPV>QSm=5efG1jPLMf8B ze%u^GJLg?61I5DI;wHQaq;4C8c5VZ=rSCC%We%m1=0PH|Lnsb{M)p4uI9DRSh&@M7umeNZ1cLtXl^c& zSD~(GUR@U>FTT@JA~lvSaC_)eQ1jeGJ5c6{4B^->sj<^Zrogt<*pkS_u=11+smH4( zd>f_R&adKugMpZ#^ANwCMiaq0#1|lbj*P?_IsYoz>1|~b+*x2JHZbVk0{M=~NVFaU z&jLPWfU%$Z-P<7dwz5`MYKuYb&5-Xrh(G>Pz+|w?pROxllDnSM@lG>rdI##W@&8WK z&ea#{(7XXf-@Frw@8V+Di@0FHy|NPc8B*4BS2^xhIKZSNp1omcva5}rwwZ1;A555?nx)q%VL>z;vYZ$}!b zIX?>jA>n3Dkbw(gI!Ouv#RjenkV0VTr%R;KcP~a(Z1{bq@-ir--5+q8qhKIR4R#_~ zhFf}bZ~-sR!o=^o4Oi#jLT3PD_%VHkE|xEpXyH2!`m*>A+-|C@Bwb#vijVN*dAd69Y!bM6-0N9MasKi@@3S; zAJP3VF1tU4Ki=Mxq2cQPwz2uX3H_1XYas5X>*($J7*HHN@!m*3a>wO}vau|8$w17m2kF)+EPp*tNzQ4egQhr}7S&m?-!gts6Y8TjAF z#jbXsnB!sQ=*+Nq&|`*m7*ROzM=oN4@zN`7WazMU?ZnlWaH0Le7*=~0BzXt^T>DLI zB`5hq+Tm7bcOUJ06#7-46c{%4dvKAVL*E?S&&v?$Xa1#Rx6gN?D=gU&rE=#2uU<&}@n>?DUEaI~F<82+zVfYW5 zfS_-y2*$XV4;#eM&b>Or(Y1}9Cmnh3VjYW5?$>Y)nl2iXw;`rua`b&jHRiaOw-2w* zqqWA;%5KCj@V9^+cRX7s_MvOeztBIP2elKJTkJ#o&fohU3a|?GA7nBbY(jr*_eO}l zz0Jqz82_MATwwu|-0?masAqw{+GuZ!P``q>`$e(!AW&?f&l~OI7IjSbN{G8p#Gftu zFf|GR!v=L<&uMc2f3|R56#`l(IP(fKE52BVXE(;4f6VR|kg@Dj?&y@+bhvPE_FoL| zp$Yr{Q>Jjwm@xL5aap}xUB-I653TQ*jwr3aXpk@46m}QXc?)pfyxoYLP}KWuQPtQ% zW9&D^$nT08yR*>P=hfJ6k@~;T)`w}Vy}N>~>fz6tTKw5M41dO5q9bXx31gFk?qdiW zhd=1=swKK|S^W}&gEeR6>IRC_-z#T6yC)#Z78s3a{%W<-NW#hgSKGcP8pEgAf<*26 zn6Z8*tsk@FE@OSYL3Y~|dF^|+FsIo|q`+!n_RnFJ*&a zbE&|hoZ-1aS1Yc5i3?rC8N=qnHM^?=e|VPQYP!}yNDeZyVqq*&sO}N>{m_BUUr{*>t;3uQ<(T4LO#B{@%{v0al zoxlx;yu$uWZaD8RWBl{$5$|OD-%gBq7y4UxCpb@coR{}p=)cRer1=^@yhk^v6Lf<* zv}f;C%RnB2vi-#roP52xe5Kw?xO{8!igSU!}q)cMsdgj z7IdEoarb=u*}AC+Ahs{)zJQ2b_;ZPW75*MNDTb?CaP?i)cjU?Q)#+_`ufXuVgevA2z};l{|gqQ`pcIHDm1K#>j(3jU6sD)~&{doMxuP0F>;^9TkYOnqM|X zzERZJXn^yo`5iU3!x&qs#!fNDzH5v;U)0z&g~o;~XEpbJ7^!)4J?sjdKNC#opXAzOa?rgn6Af>~xtr z&%+X@j>UY|X0qI!I`0|OgGT$cqNrv#jpbvd?nO$Umd6R$?+oL(VuBTl*#Ghww2n&*GN9Q z|A0j7;zNiQOgSS7{eVl+z|zH=jp6OKAYm8p`hc-Mx`u7BV+EqLzRw`9v?*-2%IUyL zfb+WY7t)oxp0-6*W1lw0ZZJmfDr)Rdp|N9D$ha7}O6z@}N%fWqQOYu?SW2tR72wm{*?W8vB(Rn}3E@{{c1DhA6AN(-=9wsIiv|jg4FtLpAr_Z;V}UQoU$Gf7Oot z#;{d=w9v2&Su3>(^V)K!WU}i$lSzCas=s*0%f|3A*J)eMF=2jNb^x4b>}zUl-de4G zw;CIXD64;hG167k*qw#Og4L}4t{aW9!)k1|F?NSKepc1TsKPNPgV{=L!o2a<`wUip$FEH$7IV9`WX~zT zX0oz?g%)7UbPn(JSA0gVy6g6NX2k!vJ8dEMT_<nXGI4m z+njA3^bried1EYfCYxyQth2O<(k9hL6T)yBDmIQ)4J|Y*qsvNd!n{n@N+vrVFqvF! zEIe;5*@1kS$;txmumD>J$;wK@$A^fVln5MOu`|2G5~Bk3z%#HdG)c6ue@>^GTM)jLHqu~M5bFOy$LCOhgkX-zEVVudrnu(R#gn9KjM>3H|JGUCc_PGR3O&&2BSk)&DB)?az z)F#Z!;4#U->m6*yo2>2NQQzoMxVDhNvoe24RMlIzcCuhB&~jAG(YW1`O0XBleGGo(NHF7^*f^`?G=6x zp0M-(8^3n6Y-9Y;sQHDY{$%nGLqdW6WRf2m1uoa0B#6>*v8-B%I^g6 z2(S)u_lIxO5yuaERQ}KKgP+Va4DSOzzckvSUmAUeMfCkEeST`RML#vFvXTQnzct#T z-x^gJm^JvZ(H8yKsLFx^KEF2FqF)I+^y{N4D?Q-P!%ESj zpC473mw6UFd8NN$v)7{EAH6|r254k_fV4$FK&rBMhF>nb(&raQTl5R0k81%A`uqgx z82JS0NQ`42Pi7DL>rnqO@(ofWD>>-@5>*`|A0aie(t|#~LOMpiLTY5eL7$%?9V4G1 zHM00Y{~M_I82JvVk!240{1E9F`4Fj*bsY5hCDJkSB~q1@RxIF`NXN*RNL5s_;O-10 z@+DFOI_UFLq+{e$q{ddoOn!=VtbU4=?_J@CL-+NAL_bApB>%vVi)Oww5~=i4q`CrH z2|q=eF9|vdCHxeri_uS!=1ce~(tHU&MVc?+r$|-e`2+lTFVHLcE&URGWBnRXsgk4@ z>(}0Qa^HCgya13xq;KD2&|9Ur4D_R>gDd7GZ1NhD97ShxyyI@$q69;JAfl#NjMh3< zPy}NIin?)mxcVLi1*XACj3U?qq?`f(m4+qL8O*UUv=o&Z;wDormKxEFNo1ngC<>R; z6wT-cg$7Z93|c-H)fg*;Q%2jyu-}?SqmGd_JGt*ENHn<28-wl}lJ;Hc&?sNF<*;(C zO4$G(FI@D6iDj@gf)}hjybmY0yy~^Jc|E&uRGVoTE%#_sFBy+x(kRceURt=ZrWnvn ztC)g$e3UC-$#fPR@6LfqD3vrk7R6Y*hcQ-GiyD?rGIhY4;C483;P#59NpDFiqXaV{ zVA8%!+D9(MN_hfSSdzXR*U`MGup_%t=My3Q5`F~epThi@SUwg_j$a;X`E&}&TriDP z#&nnm^Ah85k>CYBR@dfJJx*Gn2DhzJ0DS6^1z-YvNzKMA3vFgX17+4g*2xCl zOqcTfHS~!Q> z6%&2}SJ08I7=3Nd>WP*dWd;`V!3&V0!?Md^gk5PM`zwr&jn+{N>nOrCbDUjNkG6~I zcqbp&nqUVoUg?h}n3jAJZVVi$)NqM5VA4kfY4;gv!Oj)ycF zyH>gsO&c(S6GB7LkYAQ3(+p1xD<4(kB&WqN3Qsm=l{VK@r??_!cWM@0jqf|UYuIvk z6tdB}Wc8FK@N^oOli*P1Ra`~uPLx_Ha6FLsKx<7B@% z)9h6;Q`VUz5?OCFl70h?bLq2t>{(nAx$tjP4GSbX>_=xuIdd!Bl)t5I){Slp1FE~V z+&1ynC|hc`**?^5d3#h3YR@U$MR%yc?pr&VwTNLhce7Nw6L%n^ z4@hZI-932?0nJ{!huW6;ps4^FkwrY;RDe;W-{(ga^6*OlZrvVU$Ydh+Ba>X@V3_7& z#I>z1(JRO6zBt1OS_;eZ1<&`UVrJ8gp+Ug3v3JYrY+teDwA(B-x} zuCn<+rVBLg39#_6Tvv;Za$VuCu~?lsc4~d7h#0QtwG>Cokwsru1dZ}tZ^b&j1r724 zl=H2-Opw32_i^VTobUYA+B)Fq0CB*ht)S9w8~yCIA1?^y+B* zaIDmrMnv~8icfH8Zx4**a7Ft190Bk>IEgt`>1lJNsE=kF{EsN70=L6;%+n@`8Q+a2 zks04~qnU$=x^6TIpUoeF&AJo}iu~7{sU4ETb?%FsDh}dSQyG(`bsJ37@fg@Z)@7Cf zO95!&w9SH~&a`o8I-RkF5*eX?;jHTQASvRP^y4~?s%<}=x)Wp26 z%5N`x$kp9ItHs3&J*AqckeDI1j&RpQUkny&qdSzk=nkdcy%TAO?nF}2ok-f=i8SW- zZIitd$!OuY+h;>}A9Jrcx)ZTJwagp>aSN%tx^bh83vKT?21oZCL)?1~JNE4YtWL+U z$W@{3;b9?A?Gethuc-*hyNy)ev(3(;(3ygpZDk4>?byQXqoFbMWXBr0*`O#tD)Oil zI4 z7$4M(@|otidMI&+n*~!%4=u3*l+1`EjLtM|XS<5BPIpw(@q>RJZpsSO9-UnvSLvLv zfm|&7cCnFPQ~2(AmYMy0N2blLc8?A1MxC+1c6~S;=vyy@l=&#mZt07xZ+%>7C>m;u zolMI#za@Dvl^q|p9+RqUsnZ7-$|txLRm3t^#44UhcU#=_Xg5ANS|0RV08YAfgu+p9 zDnL>*E{&%->kBfr{<^GQUCeThY_6wsFVtyFKRryo(zX(9t)er&TAlGV(dG#+-6RpK zYtgvgnQEHt%e{LYqq^Bz-_!c++!?X+<$LYe1K_oXyECF2jqi%%Bt7{%TTMo8{wC|@ zZ`Qo6Te!uJUd^aG-+6`)&$Fy;g+BIn>qVavCCBR|G;JDO$A!&hr+eJB%SPb5N6G2* zoM+mTwMKNe^?^R1ZqFXa=|HpXb#DXUtgB~zz5_y+?laCZOQJ_$7laO|=5iqr?n#~N zwCny#x81fj#~z(`8&?FKcy*=s`?3`x=gUM!$DQcNxjqo4&YN*p6cNL@zLMf-#@m?~ zdh^=OB1WEc>7``OyoLCG%1?4OXIL#f*><=7a)E|tGkx=qYAT|MigWHLLif9$^(p#W zp8{?J=a=g^p!chy?LQGsX zvxghGj3Jeeh?Zp)jCA+xHYY)}P2hytE~T1gH23T=`FpnY59H1Sqnm{|$|+Z5Zk!{H zwh(*<0GULio$1GiYj}j4V0ZF~?oK|**+now@|*=)njBhUIVg#|1Uc!PYFav)Q#iph zqZ3W4dx|~H6Aw>ju$G>`WL!IN?JYZvi*4h3nV%R(?`cFzyd0hmD|(YhkBE#p339 zYB6g_ep+5~Net<7TNu5s7B8Uao_4x>>awCBkLV%VN~bU|zba}ao8xMmqZROs&ommm z2h;v$Pqe3fp_%Zo&Yt<9qu4m>O*%%=2pgOzv}ZZHB>H`$?(=86J1RT|-BCB^?fyV- z$rE5F5U;_W&TS2wRh*k`VM$Q~dOP;y4^f=^af^rN5V-_9qcenEY+*KvZrnKO3!}*N zJnJ3owufpTa4H1=+=UR2eD|7GMT6+3{Xru@K~&(J@Nw?(gikm1&}GnZW;fvr?QZLc zv1J$CZNn871v|nswCJ%}FT1y~iTm%#&=Zy#^>&>vai{@@Ti|?5F2Z_$l48YLXR!7=Uut&J=$ouQ%EYW zvuLuZ5gJT)Ozg5@Mmke;51U#bSLrcf#a(RAU631<96KBARG8bNfp~YVD}?iwNjRvEygJyS!6o6J z;R(BKHnz@@R|k&F@acN=NS%k;Y+$=^YG=5wkjO*zI`@F9-nOkX)*7r#=Li-IHcvWQ ze;V!j)8q^aGovz&$jt7nEUwB&W!UHZ*eQRfkyYLLP3(x?ueXIP+41s=$U(Dbzao;VPn}x@9NcCN8?yjKiXrgbb&pMD}3nA zg5*7N*Ba*^+NlhOc6K6%EwjYwW{fKw*h&9MXnJ&*Lj^UDj(oJ>UDo zxgAfkRuUaP{)QvG%DCrd0k^)mdeyMECNBX5IL%X!daahjI_>iOoh796UFeHTp^%tf zxNIzTe>jT*M)bMBM(c5&-DALW-`Lbs3V(Amq|wN2pJ64yng zQ$p7b$*o75-zq#p5{bTFFwOb`9lQy#o0jyanw-JskoM)UneGPMnZHTeZArQbP0sdp zHzl3qlFWXH07gZ7B_+%qiDc;1ml_A)2UyhNlsGwSI)Yj|pPgcR%D0<(l0SVxY2zJq%f zOQA=RICiqT`q`+tha)FDQ@n6n!_JH&C!R}U^Wq+EUo697g-25+iA3;N%5JYXTGD*1 zpCAvXB0piL-IDP#ne`vcF_U@uDl@3QO*JWbavpfd6B$&`s-<>rjuJSflA?5W_@Q4F)=8iO^5dyqu$ma$jESITG` zwF=)QGx6vpGNNxVFW=pW^Ej>RO?ritH%BkWp-i_(EXwuPNQ*Lj*q-Mh7dlLycbiBe z|Lk_e^+@mzi!GBS$9i2KDI$i8b|=Nr@@vs|6+xpMcU!TuOhH`4OS9V*Tj2<=`RJxv z^w%k{`LAbq;ZFPi;A=j*uek5{=!oIa#bZ3XJlp%JQCw!MP<@TOH+?XYnR1K+3*AXP z+OsKTz!T^`+S6%3seAm0MT#OEK$^saQ8K>AeF)ycw=zxi-+B+<6kYbtu$)urXyHU5 zx{cKwrsca+9kTmJvy|n8d*+l}lg+SBQoe8I?EhG4@_aK|f@9`1ASE~}uo6al9u?xt zHia&f`(}?exsCxiXkph}q=_S`;(~+eX{@;E_KY9mWJTi4f zaBg5-k<;ml{BOVK!$yvtl){P1cLI1q;B2<*jZYAsJLJFdgN-bDPd`eDH}su;XAjxa z2%Z$kB+$JakMP__VO!M2>9(%H)&eWhTjSQfGJ8Cq-8rQ;Z|&PX9`Ed%m-Ml-?9>LZ z1=?jwZQK_3#UE6zr?chwWH1)JwQmXlGjO!*6F~bU&v*ys6(FG)GP{xLu2#$n{R^y6M|erbz2p67cnltS-K=Sk z2jS}C>%rXrA3rp=O&xj+_`R2v>E4jm1NX-0z`e;n6SvP2j7P@`WZh*^4K(Ws!XO^Y z=VTWNU+d*VaFdwN#9I+HPA8*-?P>cEd)hwKJ;onq&%uZD_RRi#MQ*>$t`^aC&42eM|51S*S-f4fC(`f;X>~l0{{P!AorXh1{W==_ z|Jb_@z^bbA|L#3WxC{Y7tyQbRy+{DHRY(B4lmxYPK0rmp1X7tHA%G!)fO`e7w$>U@ zTkQy9)z%u(+S=A&wRIJ=t({TZ+B)K>Z5{vL?>pz*bMAXDETjF?22FDBxo3}Wf8Qzb zrkSZsgYv+#gC=Ekl0StFjLpFOV>8GirX8EfFJ_`MD>8@OOWZBTFFMN?A0_&%tWIZB zR?#2}7SF%0|7Kq16x&nVbIGYu%(!?wuGTU)^>Llle#O+CSi@=uH_ za+Uk+tWnW9PL|npzo2?By>)3Pd)z(tRYC!-OPy+FROW-jK8@0SsKD*WtrRBVv_k1P z-Bx5I)?jLaf2^nyh%Hz(AhEgynG{>C)h^hXasvFbTput-mwq;TthrxuyGFfEdsWjL z=o;)KlifO(GWefiK~EQr)fNC5NdvQ2e4ctk0jttBI z900G595|pdJ1?O9G?SW3O4wt<k#4>zTO_=mtv z#psye&+#(GoS4AdGDK-jC^~cGTx=CdP8P_)SjF}0v2~#<^zp}J1H}b@oI`F&$M52< zaom#FXBmSYU@S%l5Y~&xrGhUS1M#&>(<`4#L8t(-OkkmGQDb8jqe0YU0|dnS#mxi= z&@9+&_S)e;rz`Z>Dd00rK;XQuGYR}TVwB-tq>I2$Yrcr8v9^`PUcz)oqLnuCKv zJLI6UaVj~eY`)5JP#$N7p2P}7hpUmQ`KlhA>M+1|;8YJ}8fn^IUXoE@!6@1tY*K%F z+Z_=v2m-s>G|k@>YQg3ZC)gB8C26<)w8(P*1F?n&Bt@2#QJNDbGD=_e0t03%vOL6! zED!ZXmWNr9<>3@rTDu#M=W@YW&A^I%k)<`ZVF4!#^sy>7-3X5b!`?n3DYEnonjxoj zBFnBq>R_kVYAGfED0CMn$*}-rg=h2S zsKI@vp^Z*;0d)Mlu!WX79JFEc*38C2CZ_pDHhKr>dY!h-?TgRZ?H(Tix_~egrrSSO zvUIW)dpc8SHxE`aDIL-6ec*90{ZmE>rj<@g*EtgqO2+ffG3$N_)5UF|@$uwL6n+lt zwT8q~GOEzjfUf9V|C^-<4CbX1dW>~7&XsNva#Z4*j>ls+qjp-i@puHaR}r-{0*@>> zi-{pKPBpewhD{C|o2i}LXR%L02jnxA4mxqWbK-B}G(Xp@z0`-unMCmObQa6$*7$To zt?3N9N4lX4+Z*$Bfeyj|n;T!2y2#2{;YIJPc*KX^id$jW%#ILQnvSzESljpl)eK{1 zWLO$xa_QQrIi^oQ1uY9KpS~PV8k_5}TnOV{j@d-YG*ixovkjU}$7I$n%`!2*EZ__} zk|s82Mt;_JMTT7v4VvST0b*ViN8n_^IH@xarlnJJDwx(@G}K>CW!tB=2h53yylmkhCkm^aIeCSVM$ zoHGe3j6XJc)-X!yMsg38gcW)J0mCi4iIHEd$Bow}5dok3U~f$V?c+$GLaW>L6}g7) z+LznkG=pu(3bF~MnAtzjCwvY{5>W>yU9Jvs&>jw6?u7)_VV0sg+)`9SlLXF|V;(rE zs_yg%pQf@l$O4e!xg(imKomQ1l%?A~PPz?24jj7HRUU0=lw*7%Xk?NI8s*KOkSryB zLfm#Ht;Bf-rR1$IQ?hK2;|%SPr$spQK%tnZku!3jv<`0VXKzh)t$v|yZ(#zhuk^}LR(a58EkfhtJ? zK@v@Rz|uz8m|6@W+lDX3)r&nO#UonX80!P$GQPNURU_b;nRJ3sOmP7m1b*4{3O zOX8#xO!v?dCVtX~K}#gX9Em6?25W9PP>Mb;T)GnJvO8K#1BsD^T}N9IW{gk5j7^d- z$NCftYx0CJrf;4ch8aGcgA(~nCMA+hMQG~6(h(K{=+xugA_#4uQ!OQt9BHN)hF$V~ zoqU?Zc{@4!!uq9qfCfM;j%g_ETVQpP6`~~77@8zX_@nC{pPU$nPJI>({B!q;<;nZjw9DWvtnqur?)+-3XdNm2E z)*1?je~c%&r>XEUq}Y7LxEQD@OIKkKmlaS<<2d62z$l;-G?J={CyvWvakv-K7^H$G zExL4Xa)Tn4WCRqCjxg~Ft=K|T7qK0f!OoZ(_%>-K9%_qN5`qPCHeh%WpC(rZ_mEiH zLZf)Oi^P0K_67a{vxCFftw9)8K_gz#WH=5UDh$oQ=dk-iS954BfOM@O#zU^iONzoC zINchqQS%53x95?pnfIPZWHEuF4`h#X5otCJ`Ffm-EICEUJSOXO^+X~|{r`_dMvBGi z4PYw}%fU2V;Mio?kJ;6>T7%6*A#=up!e@T;b1S^ams*Hr5&u6K9>1$?OUr z9v1-405`7jrO{hhL>cQYSPikTLGOIECSx(45oUMkF(Rjy2sncDZG2UeXK+{Q&OynoE~o< zBJ^(c_VES0;NlFQ3}CD!_6d%Tu0MiXh;Xz3IQH`)ny~mM77{ZNC*L6}$crtqN~DW! zU2}umaB~frMT2SBl4!{jES$kM9-~#6iNiL>^Tmu4uVDnUr}3p_^n6w`)5|B5=bFvJ zxxIWGr{)aPFtDuqYGV9YOPK2LBRqUGeiYB1Bql^XVwaXo1%8aLt%>0>Ej4o3ssR&P zTeTkiXdQaG@!I7c{3xWLY(v7f9&cDJ39;}O($1Lj*Tk&Gggy9C&qGr1sndG!qq*6w z2R{O$^x#K(8R$;hjD=vfe|${=_Q(Kt8R$S?Pl5}~VscF`H|X(j?YT4on`g_qK$42w zW-vGM$bF{tT$&(OY&-O&35i8@yq%~7otF@AJ`=4Tg?#Cx%+FqT2A+mrg^7&ih)BRa zXIeWZ0z?QF#m*?sfdmLGwWRTEOF?VJ5rmdW_6W|enmw9gGA2cW@O{lxJ%K%mik5>?d9r_k^}iTc-q%S;6Gm~XgCTj6$N-U2Ci76 zNK7DSds4+SE96Df*CX9f= zLFmh5zPE{*Qt!`BC!S;|fKGDv#OnA%yY&$v!WKELJ zk!u1qoioK^Z3aN3iqxSj9nHz$tId%%rU-x$9Aq?6U{$O_*f=`JIuZ-vl(2r)M30a) zKGBnOkpT{!&Hl1>O*+iUq;ux7eV@*O8LY;xHn_iZv&Io5Aa1k+b1t`H>vTinMmD%^ zHdcx1s(tbXeayV5TkCV-Ambr-pv?x4e!VT`_e3pE&MiVZUu(3Bq zlC;r;i*qebkRtIFNe;g41;Rzh90!RuN9JITo$Ily8_EA2F2Cj&Z9y3C6B9BC1Vy{$ zpeH`!1R}dzFWir6S;-z=B}Nvpr;Z&zH9BZ^ab{P+W1KY1kZAj^-f0FHAT%Q$})#FC#hBtHH@g4vSe;GaO$qgFTb! z_M41rQtSo@5sj1K43B$a=4}LJI7e7tmm_^&m!mvC3xEqNmyvnnsI1?wB*aVEg>c5g zSma~bksc3?x<|!>)sDbsJGi3;*}NGMW<%q%(f%w;^O`(B#FT_mTBqvEh}czKsZO>2 zJMI9pbD3&wruiyuUqS-sV7u?*DR8dAZaF{eO9>Kp6~)Dl_vRZ5ECh|lnPtR5S~gHl zoH?FM{|OF5zyrrY&Q$6oX6HzWA3w+GaQEGP-hCs8J=uH3K{zE7Wt^%}$!R`9I9+2e zG5gg52FsCW3et6LmQS4+GrkzzBo7zi^-K!@tf2g?tS#U!dhZcjzc0bHood;58XLy` zjE23_V74d5Jq}xs)oYK{tA+3yY5f1Q>LF^h-WZv}Y7ZO6Ca|Kg^f0pH+%jd?eqaM5 zy(TXN(BaB#pAefUOak3I8Fi(Hs(_3{oNa0QWQcoUE-yYK5!n|q`Lbki1XKrcwq~9Z zJf8qTi@!vsZ8@;n4mH9y7B7avgCtucK#^lR%u05TSq!5nUXPAbL39L)=;80Exf0{; zs0H^T$<{?-{mNNztcxE<%BJg!b&V9;v;ib4a%_sb!RRroMZCi3Sq5&2&s3ohZ*Oy1 zcZj>&dhq}I!v7vbFt>L265bd{d@QDH9TmH`UsjHe28sK)yw=Pfm*D`yBnj&wUU9>J z=%lLt7{}&~ z%w+RMkn6NwdW*3rtvrIHS{#wmP)ZaU+rhAzi+lXd^@TulX=jljzvkqv6)!RGV5-Z zx*dtN0$TXfkS>~VGul*YmFcm|XX8;?s*BMm<1wOqtOhr85bM9q`OxBj+LBM>p9w&I;_CF5)vC=UDpDwE{S_S^(QJYeuEL``-~iLqJ} zN|SB6Pgy|XL^4DONuR#xxeEkAw26arMAZWJWK=DoBx1f1J~p@EMqZe_HD%$?`qQ9TIo56=@4;ltg11mAEwf= znh?|Ym~%2Q=Q)7XS}@}@FCv(6-f^_a6Bz}WJxF6b12^z>?;}v9mQe@>b(||~^t8>` zjHKFPrpC9kGHntNj?%FEz$3VtI97Gw6_*po`W1}RznmCf9M(f#8?D64i*DH%mASai9Y4mG73JmR zI&ILBT{Q*CJUzD%lgK@k@^#pNIq9;NulL+S>`7K}qM)^~ggiG-PdG^}WlTVw|99ae zGciifEyO*y5PxuzM@wK)u%zj}guYvj3Soz*@V3<2X7t?-Fvk^WmA446Yz0HS#vxN+ zGy8f;jym1XPt_Tr@s4G-Vip_gyPaGG8gGC<5p2ZWI4B2Oc(6^=RUe`i*@rqs_F-0$ zeYjR+4?Ph0r--oD*%}o4ifpaqjy2cAlRoDooF-8FMF~AiT^!uKts?so^%dEl z^cC5kN@Ac-`^$tyV1Kj)Fq?BL428EPh2o_;&MLBf+h5E8Y=13QGc}gE6gE(9*3%J^ z&~=+R#{jfQ^#reH%&0eiCU2xu6Woz$Q%r}wemJGK<*qcv$_|}uH<*mgs;o~zs#^w| zda6}fC$CM^OL=;{;;ze10_oC31ZiRdfQGhwlgzmu64)hoNZH3pL$K74HYSF|4`;dE zs%P$Ouc#coOhva5c;=IBpsIO_ooWrnG8Ns9hqKCVffAT2_eJbsT3~-Fb4<rcSMi0v`mn<6f zD>BvV%9$`0wZSmX@UviFMWxMk(;D7J(?@ie3g69Kw-~FBH-Fut*EH&jmD{HNi!#)< zTDi@Zc~y|imdWWZ4AM&1gxfcUOt){lAS@r>4P*wEaQ?>ktBgw7^1pQcc4~92=(g%= zT)v3a@h8}K{iaj^DsQZVtxKGv%;wWOvc_%vN^sStAFXvtyytix2R!Y8i=PYYzS{K)GqlFeD0-ned#%=XYGo?6HE-`!+6e&mHCqJ% zkI^=RyQh*{=H5$2^S9*4jedyY+X-|83?jKl1P;*qensFu9MlNJC+@J(2;Y0$)g2~u zD7z;Lk43^3JoLD$3$gS093&V_8Vw=hNU&2#1vwJT-iz&#V2Qh#coI)13FgQ-w;4a& zC_G)#PVi+&(oW*Jo=gI*XQo1T-%(yhj83zBb&rj(UDEQbt14FTTZw)Fu{w|6I&u$$|$x*B-VBjdJIiON&@L5<2>YU z2sx<&7+LOpv`!QAy^{D=NAT?H$`@q1X^6K&oCA%C&7vU$QNsm72;#wy*@Hp3m_3dP z$?$WqBxG!v$l;(S-^YRPI$n<>9{q?tB;YKnC&6)%A!Tyjl`Y^zw;{xvAye2!Hu0uu z=M0y2PzBkyWLuKR+>(ts0W|}y9 zT{8Oah`U5`4^h>TLZiz^TMQ=gSjZ%VRB#o1>~5# zDBBW3nQoyLuadB_ZlR70lB=?byn*<&^h6U*4$(6fkVf>5WOB$DBNo~z1OEfjGd55T z>50BOn>)c(g0aMMF->hJr6=2N9s!jHg6NJ$?+UD%f#dHb8U)#`sZp&dU zkBJ5**5>T(3&c|?hRZPrnYxogtH2pz6*&9)nxXxyYGr?_R;t?ImBd_9>~^U?+^8KM%X2Xd9PU8!;|4~hPv@Ut7X?65sc%8vB_EMFshe^ zClx#+yzX)VnD2;;G+p{gFVzuFM@b_x{nMo{1kfSYIyn}eWAh77EgWdcwl;uesD^Ey9C~*?+J3_RQUqpoTGM6kQ)nv zGQpJwZ9VQr1l!&0Ze)e1R@a?v8tt&Q#!^l)H2897P;ih1==J*(du+iGl+&4N}?d@!hqF@^B z?4+{|qiw}H5>3)H+QVl+Z#@RmpeX*+9Dru;c?^3DqQi5^7t8sGlPoTcF(nhRF`T^4Nc*zV;h~!gZ(82V_ssQ>5~Kl> zt$0aPrA|IKg@{b#bI5eAL#-5N#oAFyz9;}Rn3k_Fd1IrNR}&$ik72F8)G%+;Gc}45 z5Iin`#&9>X7NR*Zpydv%WNRi9oL~#4Zd$H2+hj~KZm@gH!MM#us$+dcOa%g=Iy)~g zstpaU+0vv)t0%gdV^em|m_1F6e>C2z!dvbUUl)m)-P0ZpZg4G?*~?4P3rQ>rYj4M* z>Wq7v*4(N<^kvg@mO?Fb8{!P7<&G{Qq-@<)EKWPnCu9*H8n0%!CtX1Jsai~&;#S>ef|%#rlXSS%f+y1byTwWH5rx6 z3K|#@I<^DWJ^2KsXFa+rz+1ghd+x80BPM%Q&&W0y1ZCpD3H~8sa4}#q9}gx%OjtY! z&M!XYNj%sE3y6B(zuwAc;f&Ub;D`)kcgHbx%puHtG;=8au}+Tz6CVmJeFhLGJ~Mc! z=`tXQ=&dZ=)rhk;(;67MD{Zh_mlTe|THCRlv16>R)99=$lM2HgLmmL<*C$DUN(!h% zi*mI#%8?2*^wG4!Wws|tV&U=}$q@63wYD4#ky2Q4L0@Qr&Ei#p)p8eix!g!jJsze~ zVX7TZLGbVXJK=W#K}8BbK~Yh${(oNe)Kqcpyz1hrSyN}!o>!50DL%KVW}Z%&QFU(d zw5d~z7aaZZ$sa#*Xm!PT)2Gx7ol!Nbc0uuxMZ=3m#1)`kWCj&C^E`a6D4NO!?aOq& z%&R%CYIf0d^eu&)_OWK+oQio!Rg&Snib_1^A&_51&Fncfc;<*wy~E!hHkJ=5^^;Vv z+WV)TNHtBIR$E)E?hiWEYYm+fqJ5)F@4NPC{Cge$)~s3cz~#|LK68Xx*c=^>|E`Ko z$4|BS@AT*pby9dxP*}Qr!u*R1N3XE|t|{F|J(k+2u59ewcf)e^daC8=k)w|)j%IH< zV~lz_6*a3q;eLUd->UjVm1;oy>*FVss;MRFVP8W0Uqr?f`Ts-+F#STwEHH>Q_ec>1zs6PAu&y>$FC)w^l+TK?O_ zf9lYU>W;8d4GIqn)WR}VTcUPq8#7)lEV-cSk#DVCbA$RwrJ5C`)u7giE7jT{JSqqe z8myjZNUI^8YWHw>p!N<=57Y~(sPbX;Ktp(RfJtl$57|v!+Zav?)PWtUf4F}Te&(ZU zaECfHJUmd}Yz&J7HF%?XAQc`GsQytpI#V5uOscIhEDNO8Xn*x1`FeZ@9{V7l{d1=J z_G0wnqSCX@ov>{A#K}`ut20}pzwSG7^u)=BEW>1!Md`6?Mvgvm?Wp2OYC%+{o^Rmb zc8faHoWhCs-M2>dZrgN5w2PWmwEo^-hyAgI@9)n|{R!-btW7QZc*`3#-urkkyS{kp zVTb8$oQl83C*ytAs1K1}ZSlPGD`srTb^S=uU0oP;Yr0iR^Z2Ty0yfVDo*g zSWpAA)!FlEsw<|*w-cJ)>h4^8Co@KG~wJ-KuZ8awtz>@hBT(Q}^XA70+3MD@>O`S1&>XaE3tWZDc98+tmW=bOe znk1>fbQ6&OT+<0W#TnQ-lk2APNhf0hW#RB9kUw)hZIAJB$_(}a6Rh*js;I+axTM}P z-IyROh|Yn$GqH*4W>+%-Q_JMx(C_+mOg5-< zGO%FP>{;BC$m{SadsQ`)c$zyzhGKRn#%ubEZCPQ@0C8yH$g$j7DfP2dkQb_dr#kmN zeRQ5u?=?g`Qii*}^T$eU>;_aQFy}Ovjni|`XWJjGgqi!>u zRYx`F?WAr(2lLg&R8w9r^#wle-HfMm%lK5H?h4D)Ba2Jai~ZWvhT10Nf2p1m@$gQq zc>Z2BOFe)AgL5!8gH2N4mauQod6_NQv(Q%3DLOZj~Wrx z#EX`%HlnHJ(-G{beb9wE)2})P(9TU4h`0v9<6yq z_1^gFM&$f+iaYnZ#!5Wjfk_V4V-0P1{-OeT^xcN2NYB9E8k(P5gyd&aO*qAs$o4c3 zZ@#)xfeeig!y66Fwe#n$UbE_%XI{t0ck~8t*L5s!w9T@+fD8TOiMy$b!;m829x3gL!v-f+My|2pbJ+d%rS0C?GC&=l+tocwURBtz6 z+vTfY4nPg+?OIGQfbAk2s(iInfxiDP0B^Ec-39KrP_1gjvFfER3_zu$#CP+Wkhoz{ z)Pa|;2H1t%>qxr3QCj&@5FWtc>BM^)BLet;rosbV;&0HO=knCVys#xtp&Q`@WLX{5 z;Hb>Q&(-F~KdnpMG2$u4kzm8C)Cg`ff5Plhwh96 zIxeck5yH@T68T-9M0He&Do68WrejCfcIqt>W6s)&~9QIV(8utN_LqwB;(nsIPS2?6R11lVZ5hQm-SnGs_e z>Jl+x^ErMBk9D81-}pH^7(7s39uRtS+932xf*`sP{oIf$$;(rBH&%|wD^Q za@YjZc*DE&H$Y6f;!_j^l)Rd;^U(o}fK)n}*QCnW5-d?;BFAw!E!}C3J0h9C9 zO94(qGu}5ev~N;1ooesa4I9=b|3|iVo9(5BcD(*ub)d+fE?^Ds0fjx-FOByf%oVis zXUVIM!jd*Uj3wQW0#yHAV+(dFaRh$!DmTe)W!oMxl4T?CXh~a;n4~Aio^AF*a}Z1%Ibr-U(WX&jKy<8ByLYPDX*D=XmmYIM>Cum= zvrE*$Wus3Ye@1yUa=e;csCF(>bE9VYs%{FRj%N;E8r6=PFurQ}$mOd*fo@#0IXDV4b- z49^Op!Dp;~;YM`;SK(JdiMpksM*SwJT#4cjUwDiM#qMy431W+ z-ckAi2veHWpfWY9N$t_RUVXG}^m(t18V~T?{QCOyhOJrmyy{b=-pGqe&R5MBM1{|* zgV2E`A>iqoYM+hjrN!9auPyF~u3dIDsiA0mbV0OXUil5t8pvQe)jteFv#o2EjXFua0PwGNYJVCNu&e@$ zqmFA&$!Q0r?^X~k>BYadp( zHf~hE#kVUKW08+w^DhRVvD8x(#m#J1mCO&Ey?<~*RpYevkKDWIKGmi!p0EL9Q~|LOB~`>*z$W@h?@=ZvMN%kdw>BQ_)`s-T7fIoZ z6-nuKL?lHKmlkTU8U19M$q-4Q&{QXq(h?_@iDgg7kSRfeq{bI02`Z$-O$(IpniMEu zz7i`!$)%J@2$_&#bKA*$Qz|izw5sd&o|EP#PgNCJAo66)HfP|no=FB)lMy7HA>aMaD1?2;*#-DXn+;C zOD!n5ESfN?RHZA`p)Kn45_M==ot{>QHjl5420v7OM)|Vn>3c7ptQMy6Q!-gCs+b2>kz7PiTIGx)Q1wY(1yKN&UmgnaLjWmD?;RCsYf`cKo6B5o)Xq7kGcL=Ri_ zOujNuuk`JVZdx|`;(4lfTFpxzTdx+>oU9h4F9T0frJks7f_kd8b~%WGHg#y*>Fc4F zsd?+23)Re~+Hqq_U#|tV^>ekT>}<8D^^@w3#;EgWYGjf6VJdy^Womsr@Hx1gHnkCC z#FF{w`OP(WgODsx-|N?|mO|E}_US~^wQ7C|{!ANp+Kr{^g%Ei7-o_5~=;C$^`uh3J zDEYHgI5Bg`AhPcUgab6TheD&*9+gKYMuSyNvw8`n3EF)$ z$FRumJ>DO*$GW*3#fi+fY(N`cn~+|Hza{va-poE(st4849s5L&=M?a&N)07Pb_)CR zZvQqsHlc$^<;0)D@9%+Z(5VSh#WOj+o6VO8GO=xg>fMfN)&vl* zpwRCj&*K~92W4Q6uY%52xl?--zEF1uCu?E zYMVD98d{Drmj~hJSmq5Wb9vWgmO@H#26p8J6zT+}i+%>`Yde80SL&*HT7rX=)}1)A zjn(8*wL}8)ln!v%6T$;_j;3jjm5eCb{8Tt9KxS}N5GQmXEqaxr6+lGFL@CljT6p-* zQ7JOp#nFN%3UOG#NPNOi?JPA`$n-yI#j>V`;i!R98`B|5a8VWwvDua2{g2r>stpgu zFEGl-W9``&jOE(iQr%zYgQG(argoyUzev`N9+%=L?g%Q4d2TzZ@F3Kj)i!&9wo!nh zS`PGx&16IKC&D%W9q6#aDD;(LGZxjYZZ|Lj3NU4H4#NF7TSf>SV(KPD_xR(Z#(>!& zO)Dehk>zUDc!b5V%DLB&D}Z6x6T)@msF9z0;R_J4Dz$4=C~wm+G`#*y#u{&h=$Amb z-;SnnF2hnYTUis!-6L~?`HELMYqrsE2*XT9%Ds$C5GAFVeWGdT)hV(8ekW_rFS4UW zOr~*Z#&5f24>41iTq4u4@IbfD*emP;>*Wtx94{?C@3&~bh6}(t)m2f}P^lU2_9VU0kfZ^7ngv$ZEoiME!NvkbE@%j(7f(*z;zG%Y+*^QKOjH7n+} z?AwePBwiELy@<(tb_w)tZgvR^Z&mepjM+7i$T7P_?!>T5l7);D=xby#?2=9uo`jB}K$6Z?Ac}(G$;nBLf(tS=} zS-Q_D*Q!6HI>$%rCTtqDYBX zXPT-h{0pd0ShCI8U+t1sJA*!7P!tu9svQyCJ71kwv}{!E@Tb*ot?K+jwQH+dRH$aO zsZU0QBMV0lpA_BqENsMzOV3w}YRW%(!i3f87Y$Km;io4=KZ_>4R$5VhWBKIO>bFp7 zMrVF@tlF)pSXD*o->e>0I=p;Y?WLgbPb`Ho-F#J30|DWnTfk}^j2{s1%aCR58Kt8x z9DUwO@LosOsRbKVU7MO7wH1z8y^p%D0TbRbX5z@{oa%+mrHi8S+PdiZ@~9E2g|?_{ zEE`$4VpRF?`Rmj|a1SU9#-rq!xnrUu*Z&gC$;5EKU9TJiZt!n4QR}`VC*A^VbW=da z@mVM=z?cLz5Y|_!&Kl8mQueQY(ub<^Khy}Jel^Sy%BlBS3l*oLu1ZArS}xyV(T&US zOIRM@V=2gvXYwGc?8^}^s{Go8&pZ?EyZbqFRi7I0k4IfodUfrvX>*R`R(ra59Cz3* z9hi?1)$=cWePpye+Aq3qZu#^lF-z*naHD#@zGKOnY0+?W&&Z^q{K}E~ z`M%M>688mK(G^$p^86OHYfCgZ`e<|?=u&9olzK1-i`bg(!YjJkADOmv;?<2&M|5Af z|6p|n8+xoD>U>RYgk*fzRjMyUxhrbtkqHM$y799MK0Edr2+BX%uI5&P{hzNMUyP*< z)q@Lb)JIy>4;p1Y{|q_(ZBg;Jiz_Cn-YqM+a4Ro7GumA(Dmi<^nB#`et-G>LO>R0d zIu_jUfTmSyUYpvJLgO>i>X1(LI5es2f}-&9VApl0jZtM~>S!>zecGZIp#W(Alloyp zi+Ug^8#Qh1dhF|sYDiS}`lPw)5JHMW@U1uS$7H z%{3KBt<<3atzl|s{LUg;5(u+fB)z$;h`Y_s@c$#-(QSh(79+}q3oELtVFzb0swcz! zpCU>?fg~slF@ek^Q!<3Qq*@YYfw{;OWJ(eo1KSM2(K`tH;)J{adMQlJ#P1LOv}`Tu z93N9W?ATh6?maE>ynx;}rq{ku+!*VE5KT(TlL?YbAGcT|gPr5^0|1@5`2ptBT!2;& zh)J`tKz6!3%MT#8<_r@2fKKg3egG9_^8-lC;s=lh&VXzOdmpP$u=U9%Ajwl}WF0Qd zi5|IDp={@YO&oHv5vV7_l$R-<`U0vCte;VfppRDP5vw=wHDPaycd_clq{EIFrnfUI zYNpSo*|7gQ`P|v(E!1gKVVpj@wt6bfn32VR3p8caAkIb@UCYqnU{*48IAqi~#hxj& zsVBDGMK!6s5X{fOdkria5dialp+*$EBAF<(P--s_%Xz9;XyMO;I>9sP=94$#sfaYf ziJjrlT}~e#Jr^Fbb2tz{OlmI7tU17!L32pD9C1sNNJXa zL^tm(CcgQ5&{;S=D&I$a2NvuP1Mw6`DF46HXt8?fcIJa}(<3|sRCZ4$0?fa-%9ns2Z$h#4K1IYWWhK{^` z>IV%S(W(vVv^JQIe(e+ZijL76p{ap320w)26M0}d8V!TBXuCSJb%Q!(EAt)>%hIS= zozyg54QFS+AJF{#i|}8GPTpKwt!(Q6ig#XD=xIOo7ucPK>YrkF%J-kp?le@_=D|`s z`IUyr=IcLUBB^em|HNjq;OjRV+SS>REG*&-J{w4hZ#Ia+aAPWJiFSq8$K@fZf=+mt zgwQIMA?2&gsMH&9vVnayJ`WJTmx1E<(*HPlg%|-J>FmOf=rmtC^9HWN`!$Ue1~GdY zwZYkCpg6ni1ZNkB>crP26zlz+#M*vm^`vM%Z)t3b7M_G&JTZXXy<<*0?B;)y3O_}` z(zWnw*%_i(cI;X>?}X|XVUfCWC+@3Tkuy}!44}SNKf}9q5L#^t>DICYLmMb=GdscH z5N=({pN49EE_Q*uLN+cK1!EQ>^+ z6};TMm>xvWhV6L1bulJ;9o&+-su2~tDm&0$)QHS26l7($k2YOyybPp>vk*8`F9**rJ7+C562RJIalF4_J;`S!&=TyH$f0r6YYj&E9H9LB`f%d ztl(X;g5L=%k^aQu&h?Yf+3(4!KO$>+*8mRU=X2W2;XDzlQb2$YU^Un4)g0S>tBD%1 zn((08uGMUDtNH7cS38%!q6mCLpk_vKYFLj$-9N1hNmsZ(W69no6g?pT&(A|bFntM#d-sa1w~6u z)rsiZE))aLfc^2QC+6*Z5s;!9NPPN25OK3#C&={$AXg|P@Vtw95ZGvEb&m*bXK^A2 zaUy4NBL4ve#+AT`cwaKNV?0t`g)fUzD}ZNE@^O@hNe)UvfZv}VE}u)cu{U7Gqtumi zJIe9$oNo8}hR$dgy#7hQ-q6tbbGXJ_0=PSvvi-N<;A{A zHkGSe8`-U9bgGx?OQptZ7&5A(O&ub!4aKfMQ_ud~D$l>I2fYas5Nv2V6VM9q_Qk02yA5q{2uQ2vQl>+Hgh64b zT0l_AH>kQ4sEcOe`LDV}P=EaJLOoU*u9fZs6924oe@Ve|h}FXqMdyA==U%DZF2xQ~ zsaG1Bqhe?T?*oDSG4^^Y$8?dR`?1OLZ|Ty73c9*D?Omh=s*VNvkk0PSr-fzFz9@VL zJRpqu3+#Ge!(z61yxHlci6S_i*tsD=%;WU{D53feJdI-G6>Fxlg}=4x$^LMn{T*!j z#EuAU=V~uh?@}WQL!=>ab`z5yZRJF-R%2JgV}i%kH2>4dJXP*VzCYO?@juucX9t=m zZLnmD(xXj3+L9RDLn&DJa8lumrqsX0we@=dUFv$oF?kOD(YvZm^-a-*#xd*;QupE* zwksUN2m|RDwyQXXMZ=yWbx&Wg4I9*%#*eJa>?mve$aaMvSu@IZs{N!ai=WEk!b{|C z4fMUVb{Ku%AK{`l15k5=?yAOdy}u4>6T2_OLhcHuGj2ugbjBRwbcRh!;(uGdrd`?h z$QpbPdUVY42B0PFtcLFoYZmX%Jj-nB+nC6`K&FK1Uizv@4uCqm|2ZIgC2JDyL2i$J zduuG$cw>G5>)qdR^T7>RmDI=^AWAx~fG%|303lkuX>C}{lD$xJ*h?|LxuQ1wk?@a9 zANaDLez&C4_wV;MeGkOKsSO9Q9;T;WG5PllYQx^PUl~4+@Norw2^(YjJocu~V?KQz zd(-Dpo(E2TH!VwWZk9^iX zkNL6BV;_#x_WZ!0J>1jI%UGLwC>(^N$0FIo|E70&FQ8#^sOzzZd#N8g4iykIRLyxE zNPS88#@CI%VlVYD_{L~8KED?5^A%88S4S=X(w>@}?gw5q)A!dD(C+x38Mnx&bc6nX zE&qzMq&(q~FZ$btwa>i)cK1ry;#I2RRxK*#nk;$UR9a&EG>y9@l=14c^l~+{buCP5 zPU-+tyvQ8UCLl=qZt5edeK*Mz??V=DNgi0!NSrb4xrm)HnIN>q8t}9;J5@~+$dj6I z0OzwW`tr(~rZ3_%`htNBwpqHGyJq_u$RK?G-mQXUuE4zZ&1kx>J(6geH7v!n4v31P zDQZrqp4!2j+P6&052S*L)$p`~zpB6)0TdUK)f}wefnj{8eu^a>tlDHrHxMiVw5W-( z&>_yt`SJ5gM-M>WGy%}hIhS$Xob?{0LDs_?)Jf^}Y8XnMfWISJABNmwnmP`0ipw+{ z%UuoN*alVIzU55lNK0Nj#?)~EBLci@1Q~W9T|Yn=T^BB+X`kQdf2}TNG#+1S>pyRVB=zJ z??C-S*5O41HUjmeUZtm`h!7i1G>8V~aBj}*9B_Km&$QpZ7);Z83Jw}fl*a(37Ee(zep&Zy_c!7<{(l=hFZP!lx3TDQq-=<0Zn*7X`M6;{ zUs3b4hxssF3jJ)uS~ak3y{W*~>A&Vm^S_(t+jQ$}A=+Q3Nry4l7d2vfDI2{V=>6vn z9qQ{uxeM?E-n0XGF)mXJa(}n6Q|KM+tX3&QjQ>Of$tvt5Oxv7+X4=3Pgz94Kqd@&r z_R-(4j{NNy*`%qj%@S>G17 z(P+ws?nR8|fo|y%ZQ(>L&|jen3R53}@?o=D;cn5nc1tDQXhgnw>$QXh`<6}U6WOiV zPWKwXq$;asRMgf~%`o<2v3?hNAz#r6ol-*GD@v_Ot(2m+RDaYeR^+X6U-T1ItgcEv z`kGQ~mE4+A=0gQg1hR;Jg7!<)f5tq9>PWF^*EzJNl!`867mr7)g@jVow~NOYYZtF8 z%yc70ld+~0Ika6o5{;&mN!l)6cT+r^rJKfNUuR(J>|3lUMKUJJDnl__ncB|`voz$; zBiDv9cuiWVVfGShLsh(#N%$J;cGwZ4*=DCRqabD}MNuk4DT>xIl%kkcXX293HzIGQ zz7Z8t-}u1Z22}kn1z~jvdv`kh?jmB`86;bFB?WUn9ez*l+<(s44QC$)K6zit!vZxq zt@Z?~oTqN?SBTeV=h2Gg=6qYsf}=e($O6H>3zk{=O2KBaw8 z@%0uu_2$858~#g>oDhF#-`qD5aTXsgQe>fApoYxol_0VM^;$id?PgU125$)@$OURiBZ6_w#p^Omt$B^^ zmUjmFS%Pp|%nD@@qJANa*K~DcYjhH-y0;$E!F=^YMKMc(YFW(sTM!^GU;PO3dm$<->--9oVv$(tqr~4DkS$NO^`);EFo|^b;yj^Yzz-CtOVHWg zqC6~+uwMwV!&qSI%g|o96jZ__R!@Z@3rLs$9F(EUN9ivA;DTt-v*_RV@~C<&P}kS9 znQt{xwbWnl7VHj$x&jHM``=p!p+iRxQ;+l}Qk_XKVUv~`9HY>-TtJhcdRCdPtLTNn|LdS=#uaNC5; zuycUF;=n7ItFAt2LPR-(w3xsSw( zb;(_1Ls=I^2nV>zZV5rL4)`U282XzD#Lz=B49ynM>?!=hFyR+IC;Y-N68N7dzfeqm z;nSVr!Gop&VqF$8;Ga=qYT*ZSLdONwvr@kdpxF{3h8p4gK374SLBR$x-r5j#S5>X3 zDr%X(V)_a+3?(v!G61aaK%@sfvnLHkv)_XSgMymS$bs28o13a?Key%`TERO&=1Reh z5j<(G0gcvdbEBQ{jBIVA3@SkxVBBc?HaDsz-rG4+q18{q&Iwcf@OwwW4*$DUxF2~g z2Y&D&KMwykL4K@%4EYU+&xHKQk`4KhXdyoyt-J`h!+T@&jNeAwT9M7a5y{4Yrbq1fq`Z>`^2E>$lQrGEt^WAFM6l z2N#Zz(~lVh&amTjDP71h%4bM@f2>AN>&m)%QLWpxS+Ez3!0*5Kwi8bmGJEGmX*jJj=#Y*mhpU z+*<1*nPBN+=!%1p$x~vzS=M=1>ROD`C_i~uX9-8{yTjr0%qiy9tPD79^3Iu3MR^_G zk}54Gc%oTg^xWhmx(uRF^o)4aPpA*HT})1bp0J`>$zpVTM!o<+f=(4d0un7biAMq# z`u#zz=aZB8V#!H7T5=MZEIEk^V{#HXGQ)B)Nt2VvXgxCV7=vuq3R!64a;d=9nG7VH zq%bi}8FB=qWXKWFPoHwcL}$2kCKk$DXzz|Kvv{~_3{}OqpyhPqqy6W60QgR-KAOLuewMr97c4zSYi}QoYb&0@Q*T$$XDz4WIGPd)1QvbE|UI zVlW36gDY-3gsIEK9Ows0(^o8v{X;5v^XZY(YuDaTK2DvP9yW3%!$KV+|YkmrulXJ7wiS34aiq{na3B`CK#nYBi(-^#D(N9~RPqdL≫S2Y&*3zhz@`jTI zsT7HL_hgnTCM^Fvso%3{V&Bn=)?;5?Ks&jPUPKbVT0qmJZ}p<~6Y0-CjkiDbBA$Au zE^46x+*@G{d;;6xFxZ5@O>jnOQdc(=;V*TTy(w-18d?GeGZ>oH6D3`!HePgm-MBdy z;$EF)a6u?g{|aheuU*C=y@ecbBY@p7E}(>6e|Xp~BPR|=HBE{Yyj$P-n~C9315t7b z3=9Lc1Q%8DxePYoye#PZ(ELTv|FPCNEpU8$43@ur)e~VMo^ONeVqbOXP9?bIiWa0V zp|QTW<*EeF->arqB5%3si<_-F@yuJU_{;`=0abE8^)HmiGu@r~s(--7u8+DD*D&=( z(6$n}WQJY*HI#_GG2V83g@Gt_j7zaj@>1lzu*bXxshfH%4uIR)A#Gf|*Uwa3`kGXKP zdIypB^VO?@WSZefm9Jib0|ty^dY2rbBjGSc&ah9LX1=ci1`9y88+lh@sQwVLsnv@K z82%8ZMG7P>onwpqv^ROJ6?I(Tm!mWi-=m`b5^7V67I0%GbnirO5!iRHOD2NDJW65BK_9| z2*dd$Tf@NgcbBhi40&(2xYL~T88fCQ@^RzMuE(8Fe!^!rObO z;c=$-Plq&j-Voue^iucK@c=ypk_DZwlS{dPe|y2jfw#3?4s_oOhV{!&ZWHHCQM6qYMd9(^I7pr!)piK2EJ9_mA>OusNd>dig z`)Z0st=sC6sUe46*tr2bmawEW&iwQ6CfgU$m_G$UBsD>x1?>6j`^=8cPq z*twJXs+Bl7jwT0LuuU|10S-aa#p=>Um8j}6xCR%fH*tV=Qb%tOz0a^8UG2|17Njq{ z9T&(E2L65_Lc5~D4Y>PO?>F)f48ZUv_8z$fP%f@2BA@#gG~u4Kh3w{a(#;p7n-6Ul zU$VI^^%L~7E?S}{345}7F@5HEk1w|^ch8;q^h(eNwRA^pC$^Vb%nFUGZc zxEf^{k6Oqx_VZ>pJkv&Yc0b!ZatuiwU1Im>%< z37)Sbb@~-xMlh^E{b;FBT4J|W!pzs5lP&YES&;3<3z{@KBw%^$WU~+k1uVcp78F22 zr`>(CdSBS)6#9BC9~d!los7%?};oI}c=Y1Q0?UNeVu8dXy|J?u0CfgcnPo#@k$ zV=SN$<77N6gw@19@wc|q2JAZp9K)dbaC3iXZS+0;bjI4~v-Sx^%L=21-^CAZx)_8g zHtX-^FEtm}$XC~^&n6DEi6u711hZyGo)AXM=7mwDi9C@Hz84b* z={;oKxYZv1&i?7vN8hE3}P2d4YP6{^a>^z7mHi z&e`_)lWVEZ*=C@RnvfN6_oipHargeA@v_G2_zi2(>em==g;;wPg5~7n0tq;}ioqw@ zADd=8uM2P-!qnb78iw7Gl*--GS26jKYYt{knKfk^uV~8l$w@#_vFOX0D3N~ucqbf~}pNpy(W$;4XF|7KtHQORDRUCG(=)!; z!W!8!@Gd%-p`Qk=tYfgxnljT^6L#TWvVHdGsVN2wjTBV_ZeRj}nN9qNn^f^QhjxXC z@fWD7&habOsI!}+rxDwB8pFq3-H%GAH|C)XqVtJ=>KpU;{8WGHf?9FUZ62^B!>>IV zq&)|Id|VHdgc=YdGf=NXHi?^OAw7Oj+*rRe7t#l%-qfHOZlZ|hgkXJ0Y7aVf=qDY( zgV=>pw*=3?_8qp{G4r<#29)={{rjH(iP>)@IC6nN|4K4+@qPw0$p16-_) zehJfp62nc19O!K!@wu&Chf$M$Hgq7bW&;)Q4gC=fi zya417X6PquV*|00pb3c2h3HK+X{aSwr*4^&rXm(x?u)e-LUx1eVdTk&*$48(~02GU}Fw&i7e~*kSjB`4Zy-agoO(T3!l{D(HIya2U33-1H+E@TZ=%$eE|o<)UyEfI|?

Ga`x?ljKgAQl5vK|eBgWo#QMUaE}G%}Oe5>q6;PAN%gc5mlz8fYl4c?u z(k;^U4+(I2Za1DjJD0qQx=-UehHaCvX_3P3p8pt9vc$@noQ=Sa8Iz6Q7)m&h<##3n zS$m)6&>sz)7u(j(Y3y1~#~>fZ0Zwfbi~}DV!#FaV!g!LZk_U7F87%XPN6WlII`1>D z_+pt?U7>iXAyY2q6*)4QS0*{;H3yv48Q41e<}2GbuOvt^x0sy7;&!Pu$jK;gCtr+4 zGsIz-7stTbS{+}?75ALV5_?DQ3AdMADVIj6$858Ex&V&oQzmdkGvsmC%rL<55v`RU zm4~RrdEvOc0G?T|fFwJvv#@mi%lLT%u2a{XdOJw9e~KF7(%J3f@$!~Y!a^x(Z)geB z3vjwf<2DY^ZOcJ9_QgFgBp0uSjh9QIQvGTu*z?rS5j#9kcg}9d^P{jq)zAHw!-4B_ zPp?__{Li9En>K9PfUkEBWVGsE%x*8o%kA>=I9`4OO$nR1C=crW>v28Gb5wJ{{0{ei zH54K1GvcE~tLB%)S%VggdGOp~%H7peL)#Q>5666TaHD+}fJ#rzi9Zzlycj?9AGkFTPGhhs;4^gcTI;W`M!^&^ z1!Kf*9%?2pxXV-b4}?jXn;%LL>|Et|R{f$*HBTakKFF9qHr)0yxvoWrR_>tDJm#HN z3vFU_5Z}zg%S52P5$fA~nrN*F7(FyX2yJakAa7U4^-l;-rL>j?0mEv{rs~=_Gf#{& z^)z#8>1+-oQZdfd6P_o=nOq)!@;ki==kjJ7XR-qi*bd}2&TOTzH*GTW)VIZTf{X2X zQMS#!=)w>)Pu&C~KMs=i0(Ov$bxO}c?!PVXHY~l{VjS^}7u0C$OAhq*S~JtJ0e-Kf z0e(aSd|W1!kI+qGfRCuAdFqe25(cgli0it&-Rk-f;Px3Yz(>b>i3Pr*1wIrqJlhhJo`kom^Z+!1?(t2cJhqJjt&rwGe^H2 zT+K3kO`E$BIxNR!y1j9a4V;t5;&lle zvdha>q0VQ3#rvpd06cm(;KSnqE@wjm9JqLS;H`O``W_|LIBfCu)hL&tWL3HQK(b*w zQcUFT6@cvcY#7*^91`pggAq$GRE}!S!Fu)ZRbZbO_5{3vFWABB4!~D!k&DXBOuHTN zzWAueEWA5LB;|udQa(#0<%3Ah&!(h&XG+S?=!_;FPXpyQ1Ng#dh2R6Y>+uXEJs50^ zN;Rln^{0sOD{yT}C4ljB5}N;p`m*C;m&{jMhuu)$`tYXb!XpPiSANEXXegXKRjtH_ zcu8_!V{^YGxv!~jU5UJFCGV*S3q_-_n*!Q>{kEZU!Wl!(SPLM2kxHM6ly`x768G)FkwnB#%`7QjJq=t6fV2OaQ$$Kj?{LITLr51)b zMb1<3naotO&;i-0=<2r{!rQ@R^{gDGYzJDo@auB(ByQg#nB7!_C_>9IA?3IMYr$P}KppFG9 z3dn>*YLKD36XmGSp<}^wYd*D&hIm+sR+dj_FW=LAQoU%jj*`W@Ur#GUb!s&Y9sX@%oR&%t~|p-3KQfr>3B^t>6o7^7V)PmbYjp zF09S_EiCvb0ZVfjwWz9g^^p=)RioBJEA(r`J3?1F7q>o#?^jZh79tcJE`5eXTXJ_w z?j}F?TRQi}$SuyU=V1qgss0#l7NCd^Q+FNj?tu~ACq(>^Ofb3y;KnXUUZ7dH`(f|; zhF*!iEh4KN*8f)7j)S7Z!J-l$hF~w`h9Q7Q01#W_r3tu769o-T%;DBBM5lHm3_*q2 zFa(KNFa&Au%Ok+p3T!Sg0cI}4$^q6SK0E^W$+VHl0ARTQ6w-(ywiKAanL*eU?!tX( zD=q!!SnzdZ@K4%=4HWU+91-7%J;Gdy?*>qOH=`3V{wBiSplGz3(>~&}XCAu$X8{u5 z#3h}V^rEEia=0DcdfV}%k3gQL3FD}4fqbm?hSF#}@+}240x7p@saW>k))2LCE{i7j z@wWAiUDrzr zd-L_?jQ74~cf#2M;Mfly&U!}tTPAQ~DBm^7^gb&o-r;QKczz6Y)G*eg&;)=bY?Dli zAF|!r-e#U=p)gpPi_xA54*)|FP&`LaZ}D3a>nJ)660Rj~7&iU!Z1s?mycldAMprRq zCUqLRF(46LmDh8fhj&U70J$ldPJ+nTZsE&TVi-Rxfa&49iW<}Z?u{P*9}g36sAx`` zc*kLsp6wpzOhB+sw!qqf?<`D%`AU+pAlfqgO_7p;dVpAb)WbSFy4fs&hf^|A^d@7c z>?WHtXEI;0ccaTikxV!7|N3u&U1eS2%jbJ`*6}*r#-O;pk}DxMlm4swYz=E_nR` z>3@uzsGd(j5H+$>{k6Vx5wP4J#h9=~f))G`!CUjxFAzF4P$y9m0na?8z9Scw`~r$A zq{1JC>0wd8xAbb&rlJxW4E>;9(EJY-a5UeDy?>;LP47qeP+Xj+I!Bxf(b9^BPJ`QD zS!8kh@zwV;koN09d9#y)=ggH7lj=S5P;iCgOeh0LamC8Tp*{ z@^UtI`2Z>f-;~$s>e@z{{Jbm`u8_XHEak7UeS1;*c8#?AH|g6K<@rS^@%1Q!dGpk(vX+mh!m(V; zb&WC{X5b4oL3j?28LZ}SM8Moy`8Qs@1a<1l=w5``d$C>$d>H{=`y+Hl-T*bEEpMP) z89qR+4Bxp9uh$^Vzj$V&UG zv%+Ho^^k;~{VD^G_g6OrZTGk)JETyJ*Io(hGwm@#E*oT}lUuz?Cv;J1v-HfbRCh90 zrD<1bq4f0Atd#o=Jsl)HrHZzQgZTtZL<wLIC{Do3Q}q~hpw?lo2dQUaZVL}qg&j6X{i>dJzPG5RI@ESBcX68g@fPT0 z1H{Q>ec!;T4%E{KC(7rvw8|-5x46l)+F@D^(4QvOgGpyUIbq3uni8B*c9d))m_4e#qpk52YeOd4d zj6sTVNl~mat5~~~!d4!v-fN(7@K&`F@fL#LHozAd)?shRRT$SIYdOe+z0~sv zFpqIBZblkjUkyuE;{8eq^Zj~QvH~yHVl8^97p3rJi_@xaI-I);Qm)PmVNqJ3p22Jx z>rOFRD8#oFa{WOE%*Fl#&EC`49PYOZde+i_u>Z-JH($mua63aocm%}i+{%ow2;mSYjbIcsf^NN&LFV-Nxx3@c5SP=9s3F4>ExaL~qNOmx2QtDwiCU^)2vVSaypsUw`>Ufthp4N7ny`707oJji z8KnM*b>B_x){F!Dg%UpTL|%zO(9#g9D_kxCU=jTQ)nDBx#cyp)ufSvk>c>Il^QHgC z-n+obU6ps^v-8_c1_!n03TVAW6twyUz6>D11T3ae#GrVD{x{*- z4cxDqN-qZ=1dFvubQ1GFZQ?C^dv+m_%k#;m@w){GzubvK1t|DM0neM8GKVs2kkO2F z0vdvUjBmKS>fMPz<8QzT1CJ5@&hf5>KZVVF7B{!unpytSpCav83O2@2uZVf)1m1}! zBQh^}#Ct6qHh%(zeGd28d>o08;Q1c?7a<^sh}0H2&L_F*<3N)1g=b_oBNyxYs3_r# z>PWxr494_uxgN#1(@W0CSP+SpVbAH8pbWZy7|}kN^;^G(M*pUXK{SFJcB$V@*hY-N zZ1UcC8@qiU9)k(#^-^>X$~+7*4M4vh^ub{>+C4OrcEfu86z$^ipXrZxy6ygzJ_+!B zARYJxXjKiD;6qBA>pdu>>JW+dT<=k(O&<|L^{w=JuHc7Wzdrr0CX%LKc?gDzP$t6f zQIz@BZ{6wDKwe4tp1vNwP3C_(9u^>x#Y;pmozL+eZ)S%F@w~|c`1Nz#fC!22R+0GR z`|sYmXCE^DRes!YE35o#D^cyOxAJy#z8-)IX<@WK6`5!pKi-GwP=Y+z3JL;hgfc(J zt20O`B@m=9;9$zOJ3o!wS00KZ5WD(9iU>5SUdRhFFJ~Eo_7u>N8?SACHs4cW7oXg@ zye7XWs@@|HFFpT$0JS*YJkMLf)p`Xd_BUYDbHQ(Uw>(l$^B8132ZtZp<%;%sa@1yy z9JQGz@f|F|i62Dhc$4>|3T1@*T6ys2&jnKVwc@=Ao#lNxOQbAE3Tcz2yy#Al{Po`V zQbS@8qnB0bto}v8vpC5Yrk9YO;OE)=sdQ`NR`xi7J3OqH&uK3g0G8^vWrcIbJvham zjYqKvGcbLu@*J<2UKUV`u`t!(ccmOsn;;kgaW-TUeq6{F?%u__#=n-5V9CP}6i($hC2W0zb@>w{ z;1z^Z=*0hmJo<9aDJ#8)wzB+7J%lIA9ArDbY>?HXP78Ps z{Y6W06a7lNiGDq1~oNv+n#y!TciLc?ZKVCbpI}aenfwy^bT*K``K3;a3 z9QgphM=su4NxpV|j+}O4^SnvjHP5TfdD@8^UQatU-tagRvLM!%f7iUc>+%(wtAE-l zgVRoT29Hpj?H$3=p5vXdy!mXJ==qg&X#X7WzbknAya=oR@LTs|l6=(dWXH+3Vm^f9 z7^mSTd+ubO1Pp-0F5N36894)mH*Y`%!Ik|pP;*0Ppg6Z9$82ulGf;R{3TL3^;tbSE zcLs_{dIl<3CR<2b+9Xn%0NLa-P~b^|Bl;+TTg2cD)MSgt_wsl&CiXP{OF?~`nF z8}F@cO_|#_+~wU~0U1(9iF!e}J%t>QBD}wauV#Fgn&v5ZUcO99wx4+%D4E2V3Z)nEyY%;Ag^JNP$5uT$E*80Gn z?oyI0*_e97k{!&|w`60gawptZjZB|mmx+KOe>Vj(1kFQ)pl_%20qzgWuW#9}kI1jT z#ji~$`M>4YS0J07A~o-p-(R!bKa*eg`?;Tx-%y(6@$XN{uP-8Zo7Bxw@_IS@zg8al z&RmKU{5Lgw*EBbMtJ(XpiZ*@BK??+c@x~=dld#ihJkdw}k3y$^2H-#azU2Rj*_rIpyGfnr;`dICmMSCt z!!`K87pN~eOp)t}aHzQ6e3Ec4(f1nebC1xVyh$l;uTDk1;gorQ)+&FQGZ& zLfF92oR?7bzwWWjAzrlktETi)8g@n1H{%(yKe==L?Z}1kVozZ8usH0uX>c|zzaOKa zd^Q@oOB!0w5#64q`Bl!u{qLRQ9K~0Jn)-L#@lYHSd7!$OAQ*5l|jKV8{0f-{em!QD-U5GZMGe5WlMe6UYqPSMv zB$1Z*N|BE-8;C;*-&utmcV`v8qCqLoU=DgWv>)J2K-omETo5ijX|W(oq=1*( z!yazT285e>$kl#}1A9TS|F%0J3_v7*K_#7{h01Ek%C3j_BA7)Wy zA3;l@%hZ};Tf#yN6Qc-SfLLA3VR!)UoxQsEe}qVdqaDMwbeCd)-2M2$fJBU3=Zd)? z2lZV*B67400}}D0G*&B*)doOwb&9qXk%P&iWOTm8QG~2XtQpd^0)7sR)}qDYbOH1> zG-9ox1LoU;qgx9VMlb@N+9%G`_{|R>+74Lf|Hk)1Wi))J9jb--0=8;0U%-@H*u4Tl zkM0OTGQI%%Weh+{C>2Km#{lrvF#tS-7iIv+aST8k6b67ojsc)Sp8=pm{EQBhlmX~4 z=`4t8uZuRUi&f;Y1EuId3M_UgwlB&{g#HIDV+g2#9a0~> zNXEpW`ddlfAPi`65bBQ>Vtfa^#P|+i#+E4DE2b%;MB)pH=~l)V1yc|%8A3z!#S}j{ zHW;2fGZ|`vI|LusB7Q0zK=;^7;XwdB@n1sRA}3oM!QLPsBGw*EFwT-NGP0z@=U&M# zDG%{EY47iv7$)<9o#`!2Y5zF?amTgZ+SK$0@BB>d4c-qJuJ{UQE6pCFJ}715L=(d& zPC)MVytm-oWV81ZL>zfI()w6C;yYg8y=7Ns!Q0DkSoL1|&a!k&d;t#CdEOtwqvd&n zJn!@tywL;r?%Jc?X^5fOm3i%&2SCL!v1f7AZ!P0~%Cz?{&q`m!K7S8C5fHTZUm0b%JX5>kE_e>VvGaap z-P!aQzJFc(^rw3_a$W95SZS;GFb#X2_rYC^fqNMKGK3ATlyEygPEf-JS0d-15NOGW zLW%bJCye@x%+HYUb{te@kRW5a-o6WExfLNH9dIhw z9`l}m=K}~JyxnWR(_4e%%bxdH377js)BPykAgm9z%rwEJ;c^~qeq6&Jo(_Ee&AuA1OMS z-+*({>6e^?a=(uNA_V&0%CN?}Wf0RGF&^SX^`AARyP7b-M{q4m8uoy3JZQ#vrhEC; zBwBAqv_v#_eUSSr#O-+>hYE~wA+nbhLc{hxkDxzXqlTd(;||{8gP)J@%pBeOOH}w6 z&WN80j6oPtmv_bQ;la&?yX#Ou>pi{EPpY9QS2ObPcpgdZ+S zkl}!z*_F8;E&el~QbX-><`B*wq34I27zzvI+?ifT;`A_1`RLDU>?e2Ehj*rjnzq)q zR+`UFFFpOP=5x{=tw=ugPIyM~@W{Ccp292K=XyJEyq}Qwjx@L2+B_${3Z?!O-ZMlv zc7GaIh& zqs`}ezq9iwNZH-ZM*-O<5Eg`19#h==yPcx@gd$Xlh=dEeVYKU0>gFu3l^DeVk|?!$ zD+GVuy$de#nPb@9xI_2s^cD3O=E225v)%84cb@D02u7&1e`Ro8P)>{Y7qo*-frF$Y zpKZ#lL@?e9@Xiojy7Pi4hrlPUd_-)NP2fGoMB4KS$i9*bi~XlZ5A8 zd&S331TU1aAWI z%tG4xBguH^wny=@aoYO`Lw3GCFMTNvXpcQ6p{OW&{I;VE7kQs2)c^1C>n-^CUj6yV zZKRCv#hde<_kFm?@#Ebd5rID+m6GrBfZwnX-bRM;E-Cq=+dz!c-rI110y#gr?f%;{ zXXE$h5DjbLR)CmVTw#4B^ib?E#1d@~CiST%#GBF5DhuX$KgOfWQXQuj{fH-Gh144V zKgfFrh_POsA@Ngx)0%z-$M}PCx(A)!m>Ge;>p5tM<9Tb-_y+u-K)@rCh>?2^?(EFjY-uU+Kc?W?yX!wnY3c~6j6gBOA3XyuaR|Mf=V7t%rzIof8%p1UK_hf#N z`IygYp9faUwIZi|9y#rvOvm@U+Wlm-_;{~j@rg-_qpm#P%q||-IgYxIc`uT>tbRSK zf3R_N@OA0!Wn8Mi;v2-{5E5}qG+OK796tyOghgcB^7Fv>cy!+}zUPtg5ub$dHO&1% z6XRGl-jm^F-%kTl(bDVj+u9N~`AObvvH^ivh(!D3u0!GYSi9Q<75*4TWo(BuLwphE zMg&0W7kO(a;=~wny8*vHw-ZZ>gdcNmwwWe&;&L>k9T0)=r#pu_D){x72uBB{)yJ9; z#Esb>gxGZUN050RCLx!q)c*y~`?T2E-EN;T`%}_$zfc_B^XBf>-T>l#jMN8g4K&5e ze2~2J!C(1Iv?V>CVhtkwFZMkX3hD9bj_O=o{AT0^@$3Sj^yO!ShTwT;WR7;s-&$F{ zA#>KJt2g#+ez{kC^r36FbYyPy5WKbxXKe6y4DQhBRuatkjT>BLlCmg$I%YH?(=@d{PYV&2>eMC z-hoBv`1#&{ASgHOeIA?TeDA1=T|eTnhx5I6$in?ImXzODZb0m6yLZvi2OdEAuW<2Q z=)9u$JntUvSTK@97!~eC!#9rV3=j%e@_gVJyL&UQp>c_x*U5n#02}2#^KRUee&KU7 zU*;CWFds`CCO6lQFkSahI*_P-#3Z9XgcRp}9Nv`ky+?nG6!||*$UNVh(7pDLJk`;- z2si6LMtkyy&dq0AirdSDb85UN&|8UYFn>vL(*XKZB4hEG)f{Lt@n3cr?T`Eg`0JHN^B3hM3M}VCg9gF_mv= z2{D~3A*N~1LQFFfVw%JdVYv0yX3Kddf6ZX=lBUd!nPbYR{u91UP@dbNgrvQDvG?Y9e<6(On`BgY zWdXdqnStW}{9DAoCj@61zxV~!Yvq8S&kN)TWl~e!= zzwgd0%yeoUd^<_llVw^%(suAihogTnNC5ebyB?*b>S*S> zneTdesJsRIU&Q~g!W@Ettsrln2RLl(FgWl^@)tb>H2LH0)LMA`I0RYTmWC%Qo-G%T z&(lb5sNlR|9A(~+!A<`y#M*PvQYvdLeLmH6xbj6plppVgqfYw41sO-d5`SCP@;7U*38DH@%A~1)SjFl5@Nt2txhBX#Y9h=alxplLm%pF7#!W`wJQv-kXLD z-LS|b(xV&#*u$XzsDKq5C2&F(XG1cNi$OA%i-2{G_Z3%C*w{gj)~!Es{Zx!w;26Q49pJlFfYV&Xf9i6@au_VD*a zLOqY-kg%qB8*OksIRDF!BIzKe3fc7yK~1`GI&)+ zWe=X1Ly0f##5tYx<)k89^W#?25OaBd6dan0I#nLB3SgKl$VeGly zp9{u9Z`Du~oNuBuk3O*@R4l>>A!h9%vfU$m`1uq# zPWo)u`Qg>pIn0b{ZyZR{;++sA`5BO;1?TdFls#UDB*?o@&|{)0y%t9(ot}sEd#U4z z8mHl0uB4pc2If$^{4Weh}q z>326hhHXJhhYaTve8vg4gI*wl`E!Y&ud;5R({v-@E)B%~)}=)gqQ4r{#mGOPBY&>< zHaI$MY8wwp7$ogY!Vp*~l_K^q#(wwr8BF$w=D`Uxcbvqi!Ob8?iXs&xT^0F}999)vDQ#yeeF5|Ml9GwDy<5f+1%IYDA55qOzL)m8luig) z2OMr8iay)0@vmDMH}pw(gC~s}nw6-WS!rbUE<1ZB-&2l_36MduA0UI|a14yVDTsl=&uG*OfD>6A z#&j8MT?m)PqcG?N{e&WDSjBpRQB79;T~=sg~vg6fPA6#(W6be( z!+Y0)%YSQ+kL_iE$`iM(ge=A@7#?h+Z|aZW&SW=QC>bMZnYj@A1DMUaa7 zN5t{P`!Z%Y?iXd+Z>*$Wd?u!~4-ex&$i%Cp{7z5MFK=7#opsbJ(TBJ9t_L1ZuRO1L zp7-x?$)&wFz>vuAHMoj+{`b#tThOU;inxVW(4Qt((9e|z_4LRyj`-S<=Tqg5gSm3Y z!KtA;4sd@Co)gtmv;6K35%W7wYV$eK)4d<#o(H_hI>-Aran;WCZsffdFDA%uq@$8% zzHcM?1c#1yt=t01@Dyl6SRK?)L*<$)7uU~&6O1+P!vS-7WAuxez22LjB?9DMBckU^ z$h*5u+I^1;!Y8qY@z^b8&CaIKayX5cQDuY(lmrc03#B;nI6{>4TJF z*nX7_x$y(0xYB6HdKn<^7~nx^4p84{(IuWMhv`oZ9j529y2JGR&dpZ;fGGzMx46v} zi(89W+^|k+al>^9kbzs&;)d~v#jPb|abruVTZ|e^@f0e0z`<`t)L=e(IlxTj)8KM| zxpFxGn|FuqXTF}WWCdG=uye1yV45e~G|%EiW-SP)V36GtaDz5^H}R#bi*O70d5XLn zhYci2W$$F!2h;7;-~<&`87hR%<%geNnd?8l60Ef10^dsH!?MZSzHTzf=kY;3ar0LF zvbZ-%C+ngyWoeo8O;S$#n1c~F?TgM`)(yYPM{w~#+aa&K+yimP?ZPb&W;$@E0^&yE zyOo574~tGk2X*TZM3d3Msdnq}5nIpD5}`?6odA_Rcs`a%dUe9nv{bx0fgB-B5rV4| zp01^8TWc5^JRb|^*XxG_Ye@e2d~C21`h4tUgNc(U|M}Rt@_cNIJRf_Shv#Fbx5|3w zkFN63&=!tzyOz-Nv8USeu_U3K2cM7SHX>m8d@S(=p_x%?;6ERW8%hwZ(jw2tB90c} zV>2J;WP2z4eC%9#K9a)K_mPDW6D=YxdCXW}=UpA>irKJ<)tj_XJR8sCz~aVgQ#KXqxD&fu`AB zG9J6A>8Cu{@NDRBO>h_USi`gFaKD-I*10^L_o~O*x9~_qIkSN3h0z zx9zn|IXQWIofe4jnFK8`NEI!xiD8*TnTIZ`ZFwMl(OICghzYw;v`B;jAiy8d8g0Dy zsmc3P>HfX=c?V84G`1iNCrDJZc~fuyx$TL{Z`6>f+Zw8s~B;zn4;g6xPfQ$%X#C?{xmGp@4@R9dIK-= z{&44d9EI9*fQo|M)&oIJ-atiLe~jaH zDES8n9+21T@%@cBRomqKo1CzHqr656m%p@0&qw5%n;e-+=A8MA@%}vGHf96#Ph}ty<#RK(UOM$$ z{C%ddv1bzVndJ+E0@Ad#SS|8F(*!;i=13rbGvY`<17E~6SrVK^nU--R6sb2(AklFo ze03rwB&4u$0y)k&fv+N_g1XY!p&WwU_#6o(;>HOk1;&X4j)Ve~Jkbnjyu0rKdVZ86 zktrm&n(Md}D#jexs2Sr@7<`yZp-N1OK*|(+D_& z(DQ#lr5RDU21+z&MuHNpssHU{?#UjsCUq0FTh(m*j8Z?qt6n6orILA!Y+4X3r-v+eBh^11?R2;PzcSv&Bnn_UwxOyd8NAjK%}DI7-X!d`cES?-K!i z0!O3~tlz{C==RJX&7|<%DEt7fgrWVFEk6K*DSq5jIRhxE>6~brp=1+w{4W)A70$oMYQ4;-iz#h{g8vC0?KKU3t>?{iXZgh#w#I z9+S@XwGjVgrH1tK@W1!fR#;JR{mGfZ8&3@jyI67eFSG?dKIEHtC78aG&pfHw7ny-k zSI+0)@w1)j-(UdDWIfHx2Q+Dh+57{zP(#K;z%RqKQDhDrt=Mk+h>KjO5z zysR<~k3GW=aN{WgO)`i&;*kKptb)_hrw@65fbtRf^`$6``>_4zl-Y(Fc28t^+*ev0?OH68rHGSyrcnt{I|T`vuh6& zSRX0ISxjQ4n!JCK-rgy_x$gw%4<-FO@+RU31i0UkHxWNj=6UMxJd0puxsNJT9}WT9 zl70v8Q5wV|y+>f85O@+0K&x*Ere-4~x`am+{{9mg;ag;g2-v&anj^Go?_qN~*KFDEMR-*DHY zFX2IWbkB#h zs^Kr78%RP8UODHg@fx^l&PF4@!UyNKx3|rkn;D03kRPV=bNpr9^6w7$8>DZXuj#)Y zOCPs%;ZewQ?HsoB7hCxY?cZaT{_ocBiq^J$>8o1L+v{D^+Kw{kyK)P<<=b=ZH_GiL z6kPrZ%ikQ!cl~c%*ey8@a;{$4(w`Tr=kmubzt8g5wzluZ@%oe-{|U?gyp4b5{^orx zYYwIlc&l4G_RjIgH<8u;e;Mm%zWtu))co5a>G892-<-X3`&-AoJJJWe1I_y_*>A6R zS+|tCPX4A+0*`B2JKTH^TmD&=@8&aM=@*6T;k!Q{%im`CEBASOn^(8C?>E4?_RE)c zONn!>oNIsF(hpj?TPN0QglAjOcKc=BQUjWneAph-J1o7!(p`HTFZ10Y{&!gZ$^+?r z&97?h*zfHXkooP{$@>?E+v#m>cljHwpOWQY)!HWCIBxcH%<|`1zU$v@*Ed+Y!0S5n zk2?Ig_K#crYc-kcbrn!4?eaTb-Yq%t{N+ex|0itxxn^zP%GS0kThBWP^uXdAXwJ31 zbVX~&iq>UUwT?IKox5+2Lm1*0>+iPq-+}uZ`7!)D(tJnyK=VP*6&E~U`Sx75lq(nl z-Q0R2<4}>P*=Ripmt`1Z_iMHODYh?i`{rDG(A%)TW$)ZO(rXShb6zaho7Zx$Zbbq< zI6Uu0k-w=FfGXe4$6w~RjdsiLUz5Mg57spaA6f57 z)a$T%?S4J|=JI>%+KuLyE&q9beeKuPAGiGTV)>2W%Jgwy^bxCPU=aLb`h=w~O=yq# zsZzJ(%(HygUYn)6eRm=9S$-ut$ywjZ5Zo!M>^MfUU&tsY2BSwoa72>xyk0a z9rmg6?{`bV`?R?55vF&)SJSVGrH@| zTY9{mjt1qs8(`}^$I@MUhb^7>#*f>VF8`S2zf!a7^4s6vEjf2u{+f_H z!1)}r{G*nCZ7hHM54t7C$u;Od0*|&2X#Sg|l<`-5=XeF4mjB0=zjA+C_~bsr@A+1) z_k-P1;;mLr>Dzw1sS8TAulefMmx5`7Rk?Yeu=0c-KVg2ye%n6OE$JVXzog^Mdt1Qo z4y5->&u)AZR_=HFa^T~u0zU5g>;15{=f)S7Z`j`n%U^GFT=>>`SG9JF6a*_4+Gn}m zk7_%gI72(Er{mYOo=BkMoM#dpX8LXrrM9Em?rGYr?B7P^kFflwwY;NqoF~)Ut(_$L z$o!3#@9MFCrk5=pU**GfW%_PQzr)gnpMtOLZ&^b&xvsTaGh93EHm)$eX*7@Rm~7jR zw1Mr`KHFh>hqZIMrOP@Zz1z~0=Er)wV{mcpja#~#pWqM5yK9w#`2o+?5X)A!QWkUl z9=7@y+jt`Q%JKfVTXMb@-CtB_c4a&Lxj$1j$&s~3RKB*Jhj&fVu zuNcR4@1A8_9X-#o?6 z=C!hj$a4Mdeq7714$24er7w#)E)I@v<8pBC{jAoXr``GZvC+~mw{*8&uHO?s*LwHV z%`-07?vBW{uKrl>~kA42B)$jh2)?a>-{ZBlp^{+I+1oNj#0L=h+;;$RlEA8fh$>QV2zxylQ^82lk zb;tZw*oI!Qzh#wa7Al8JaesZ=Q!6Cly_%c?VsS?(pCley{tAQp%KhHH=IdL#52RH; zbK_`BYdar`jALqi??S(V@6B5Oqv(?##RJS^fAazFAdsH=(cU@xTGlFMz7SviLE<=W5Haw84OxqUbOJT3RmU>?|aE4b&l`M~UDcs_jEzt$}YKWozTV4q&g zIzhRwYq@8ee6ez0%icM^+1gD@!+zk78)x@7CvWe3bR+cTZ?)XL_|1>omo7i`t!_z= z=ezZqu>6k);|9IZeZxJ+m2-Hrk*FuFh4AiSZT~~Yc>MLUJSa%ikxtlD{wAOAdLRXtJ){RQi}J*VeA%bNfi-lGnDDDU@(s zT)BxCYPq9L+VQ5=?y!0<_$1@IP|H1T<*wh~az~m5fL8(cV3=wIPo{U{Px&QU-@%J; zVfyYDYkCr#nLlCquAXb}n58>7bKkN2aZ3;66^yZgJSP3yxTccFINr=TkRPmk(s-GF zEH+P$m+8kX-Hn&m+40=-NB3SXZnPtpNHT4 zxP9dEk2lC~G)|@;N4u%4j?>vmU3zYEu) z@f zeX;hN1V`52X!&6{GQDi+VK_2<;+5K-^LGIM#7{wKnqwmzS?+|DTWsUPe8}}7d5hfb z!=>XYZSTca&f&9z%SOvjHE99mj}&EAwzdN@`&#x>u7!s|Kcs^)ubj5~39F}iDUG~B zup@R}mT$jC%OA3KR#HxZ=pD9Sv)pcj%O)!)u=}M%E_~K=eB`^=!@y5Yt;s$}Uasx62ID2F za9zUK;?o;fuY0-Hn>WLHy_cU{y^U9By$=U^#KH8{#B8h=w>~I9F0}vMINMgttRCUq zh>xe94>;eT?IqDs=9evhp23^#F@3kCJ37ksVfwhGC)rP!f86p1SRS91W-4;^dta^N z`AyBP%Re$j{_&f%{&TFp+b3?^DZ9qT&ByiI_8VI7f>=G5-#tZs+43oJ@Z$dqj{Q>*g>_EV~ke^ZYs;J$G^-sK3 z;dEMnw=_994cNCkksh%8#Ck1%o}&jsZ>U+ks_<)Yf(J9yO zVM{mlwc-3&{&y~bele)+gvZbHK4%a6C0vi2N2IA4};v-Dy7 z=12SvtI4n-Oyalecej;`?{Chd(fVz9A;x3%lE%yYj=~Cdn+lJY>D`v@#>;Uqz1Py$ zORXp!Vg3=z?`@Eu8q)sa`GkkGZ}~rQ^+RzV-9P|Wu6wh#)2Q5JaM}GL%zKO0Pl5~c z%a$L83)6R7dKfNDAGh?)ZrmaI%KTJO`^z`T-`F63KFYKHgylQARpkfW&%L4j?3ce; z`9+KwbzuH(%MY&u)5k5{twW=E zu%Gfp(2FPOhxx}7`eFKsSU;}4)QHa0$xmUv!+PygVvU~N7Utayk7I)6Xc72>c-n`aQ~hu8mgyppIi@N zm_|0OI2pbiehK)3^^=5`%x~MN;|Sv=)7vfG;U&k-^vTwn`O{f%mYd0X%Xp@_-o!hW zcb_ItzpW?#CF@-c*VI9c&ttg`o97R#g2l7>@ma3Djc-vn&9U}0j*mXM)ve`Fd+ocl z+;?1ibQG+k+N!2~*IsJ3miseTjsa=w7@Vf%h=&~C$>O1_KVkjkOaM@M&9uxrU`c`h zrU&MpIajwX+dFq9eDFFTx8CKq>p0_h>hjy(q4{w3F_^iwCwtULk z{J4BK?y}{F?XPTix20d9)!?61`>U%rVfn^xL=R!TW0vmpWLN$~gYv2OM(0KPz;@dI zO4Gk){p&t9zpn3}cAPfKX9X_S?#Hb@yn#*p>fmzB@{{(B)IX&C&$Ie&9_^Oy)*nNa z@pf4HjaFXpANJ_D9n-ZlZskw2@@`y*Ej^;IqYUwv<4XOtj_Vq$hknFPY@TAj+;rkr z*q(b??1k2z;D^OGGyj;42O_$B!uqU?=Y2Y!am&A69m{*?tO4_A3E(|$TjgLaNvexva?{OUD0K4k4EeyJbYMMOjIpruuv(pJum zxBPw`@3(_-tYV;Ad_VO5ftGW4=IVERS=0X|=zl-$iTikd0rIaK=eX7X+f%e4#;N{t z9hJmcfU@qJ1AFi_trMp@Lnu*&>r=i{`zbYP{oH;?6U}?)ZfNb@Hz#BdcIA)XHNEm~ z9_?Sz^5fP%?6TsIq}iEjn``&TF)g>m$_d^k*uh=9)A3si{u%rx>|f!(7H`&#G}m75 zpJ@MYa^pPU?P~!L4yN}v)9&KR9Y3PwzGCHG4I6tiZC=*CD|h5$TJD9eJ@cC~LX$<^ z`W^pMEq7q9HlXWAItG7?(lNy`S-){aEND5`Px;eY|9aPt`dK1!sO;0vXt~u>lxzR< z>6I(rtL3WJ9{4k)(7i3ITf4>6;>LGkVtVDu_i4EsY=gk=6@kCQlVhLLa(`)p4&wj& zXzJfP_csuWL}S0g$ETNH((*6YWb!HBpGUYHkLBm~dN+`jkq#%>m6$%!1-J{x1Enr8_=Jd|`V>EWMHZ<;snJUB}r-o?}0a)^GO`$S>c}`flHHK1?6C^rZPP z|Cr^6=fm{lmL8rD(@$9XrX~e|og?aX=ZH8TmfQGE9nVzdjy*ta!I$Nke%#tah>d()dg?nmuXuX9rJru)IWD%-Vd+V5V1C*1 z!*F2wZc7itf$8IxezEoA;6OZJzI#sh3qgKeoHWb1=XKvSMLGB0?w%>ixhHt9Ytn)k zS7N{JQQq7X<=iv9Qac zJ?VYw_*~bod-Hdu{kr<@8Q&q>kp8-cWi|~qg?(&o>qdivt3P4z`J-g}nv>gc^75M2 zc#gIj#xANv>0{|{qu7+*L}-(O$%D9BzLpUxw)zPh*;=iNNB)jb3HsRrd7 z-lu-9<9&z0)yF@J;o=HP9M`JWZjQvw!#(qPGJbf$J9~cfg~7Uq@yhqZJNUa7GygJ} z&)yb^)7OA3H?G6~uJfte$38ANJW$(rbKW$Cg-ep?MKd<2C*K7ICFN5`>72U;c)y0(`H+Z@6x)(0H zZQhOW$2~gP(V(2epYDu~_Zt!XA=co0!2V{8XCFeyI)t`!N^TzR0m+%-kL%Yx?0BW= zZ$92ma-M?g(B7)!U2pJ3z5TQw@AaV3A6{nKmo44Nnc;J#%s+lJ z{@`#aXIuLjeU>#tE<5%HPz%LN_p zkkwyLrJHv`*l+G@mYZ%orR#UHctx3y_4}mtBXI=3+1g8+7tLM7QHHhP&%HfWbmI)gQ^@%2Ev5cgzJuFz<4WDQ-P=q~PPOxc-3PqA zbKs_wI8K3|y^GXk^eLEMeOxBXyN86P@}IkLZ``bK88kYcyCeN-4NzS}C-zE2smhEe z+m9Qs2EDMiCBpg_)7`TM&i~-#Caw$fJFH&PK4E^@^27Ut>ANjGyib@uZs~FT%=I^6 z`8w;A??-X@jrNhBU)GxO`h<_n$L@P~3fg&5g5OLyz##(k_o`Qw%zpU;Ve^d$by@g7-;KfkSTNg6NnyDdLFUZ(e2 zx*M-+Z={tj3(7Qe~vS zmfu|5S{O~`t6NHy+NQCgRB3CWQYj9`i}hDFr}915<#BW7E%{n%b73?;+CNgrmny~0 z!7u5J#n4i|Fglp3l!x=Bat%$U21amk^k^Yp?ypn}mHgn?NSPHk7iuVwNBh-$adfDp zBlg?3;i1WG6Dn*&1 zOt+nLZXX-17B`O;1_cT+Fk;+=%4q*^ii_ybg7X+HrN+vG{Qy3Zpg39#t(F3nuuSE* z0$zYeX=n)Ve#Zxks(Ie?Do9i*^bg9l+$kgvNK&oe*RShYdyVv{tA$I7Lt`q}Dyb3dlwz3^QRv0M z#6gSztMb)O>0;rQ(nw(;fZbmk*s@@-zxcW>{aCS){_Wce!^4*lr5E;B2Da#*q5k24 z5)h;`TwPclyk#MuKhiWT=jKYGS|v_0d4T|8bRo+w)bE#M7j!M?T)1hhI6Sy;b#ZiT z`@(8vAP(tD0YD*!Md788sTHcV{77lA;1(DgOZP_zOSna%L_&5s_q8mOz${uoHvWPI z3-v#YYN3H`LEZR>*NG@QR_)(hz$@;}w? zfC}{w+%i^HObb<1Pzai9QP8BpES41ibm~8g^q(&MXK|`rDQq>6U9#(V=?XPM2C5yAaZsaDiDt&@lfr!NDSqWuIKss(*;SN<> z0H3%B#Sg6)DAM3qIYJ_3GQr$PCGsV7l}rhA1LX%WDzG@;Gg2PTcIEr>#lidt=orwhnoDgSk>|liHUZbS4S;N-PBps-#Rf{{ z9fJJTT<<{ds)4?2KGI@;UvAy1K1o~ESG!a%lgG8=Jp+BUY&LX{e9u5Gx3n@?tpS~K zTeXd4?%jt=$I9hGWdQUr$Lr8*rLkd_Sk+hU%C9Swvz_@h`C=7>y#JQMAZkMz(U&Dq zSh0$wFU{vSkB;RBwr|fa&F4ocI0cUh_4E$(RdeNG3|HFjs}{HCMpCtrGLay)wK~v0 z3QQxgvK_%i1M zo}pZ}gKIqq9$gucw(5bi6s_b7(q=Km>5c+a!^8Q3evsP64R^9(a4(|c4m7<6G^RY< z59wq8fZklH?2x)ufRKxcMk`|jH2@=*>+8s0pT~M)68%^mo#IqWxVSP1EaU>_u>wTY zYVma#UmV=p>k`bOP^slcfH|Nc1#sU|<)+_E4Nzk82ew8QHV+ERwdIobj}X^~3a(67 zXkPteShtap0c>6@#c)4Xf}ktO;sFP3FN_upF?F@1vpS4iM1XTHwSl302maku08x}* z+0;%&W{d~2oefLDml__1%qj1n+=6F3fQ^uj`ebXW2#6p3<+7wfF6bXFZ|P4_qCi)X zou{J;w!tP>+E$~w2+V>5Ox}-uXpMQ(!TdqD>vT&k1w=Pj`Zp0sT>ZXIw1lzYAr}14 zO)4@x8^?Zv!i| z2Tp+0q$`Rz$#|m}V(T}YB7T4`j0y)BNTR%ftSa)=D_ zMr?v$0w@gZK*5nsrQy`jHkur;afkS)$UlxzXFCG~15pKHDjmh13azaPAWMMP5e5fj zT-%q|eWiHRS6vD{t5Dfmu-^_nLQO!n1iJ&9z!-@lJM$}sAqnNTV3c4TdA>P99hMkm z+}Vf0w7@oy>JzNNUt(^;Oh^QT8aRGiq&$Vh;ldCq7?snS0l@@ZbE%ywv|*gLP^-BG zW`(eD!8%}P*~LD>0DZv~6D6y~h0PF+6&vN@#-i~@6;%QT8PGk94Kkp<6RFry0rxA8 zq7lLPdTL8TOWfV{DI>a)Ir*!V9ya$;cv(jz;Y51uo&<q0h@K8GzP+AJV0qO zbHtv&UbB}H)=--3ai=L6KI!6;nSne5WYZ> z`?^E~BfsB;ivRlB6ecjp z_oPNdGi87N*heU~`%K-o07V8!sX9&7&WIT>q+weEnVB)QCOELQcdOfMK3T|i1oD=& z6XNV$q^I%3Y)5qf8?06wDyshC%Mi*WIP;O!aw?Qe;^{07F6Nr}qQ1bj zvjYPZDT6v4h)$^jZ9SKY*zeTjxM09UC?A%KwiLE2xPtYZ7Rv)JLMJpeA97B1X^1N+ z9}&rs6X%|*iWvhYZtT#t{c9m6Rkwg(3;@ge@|%ZCn_#NOWJ6222qJ%cB}LECa!$rp zQ^lnM-x0VwBAi}Yw31}O+L5$6EsX?hTwo}$k&GzIt7eHi zPV2As4?$oGZ``E=r4bn9L>*SkPN2Chql_%t2UwGsdCdBc!AyrCjT9Y2^6}J7#Pw=p zp|gR)c$vMCA&T9f(pd_IP$5>B#bB|DLWR`GrV8^xTUFQe^=DU3^ngXj3T(LM_#Bh1|)$77{7q5WkW8DCC97S0M+{hmxvo zDPjZxqi`k{$N9oosuW4hWgU5lsr(R4%b-)!h2o z1d(JB4Oc$dpayNbq+l2%XsvDrg}tJ+!o;U@ofJl7d?B_acrXGJIgG#%w~Do@%v$s& zlC(w8rGrNqfdu_keY_THO*RMBZyQ8$ZPzK&usCfSDU6U5=@{x}RfyU7pqRkJ>B~C8 zZ(v^;8mWoPQ$4RB0Hu+9|G)r@u>mHDi6pn3f*5!|wh1f%5DKaVq4S}wxhMj47;=_5 zFw*D-Dnf$>pr||F>5O3FXqND0L{dD6*U^n#8m;nvkhwlwdJ~-B$sZMByVm z8lpBibnhzB3c(#wu?K_g)4`@-xGKZ;$W1L+=@TC(QdkemC?VGRD=>`OW0Ou5BzA78 zh`*=^Dy)Pg#G{s)sR12_WUt8ZArKQ=`r+k-@(Gk9(-fhidP}jq%>eFW<+PbE_%Cd6 z&P~BC1!Y5A80s}C)krE(ks1_(^oHfEl?L^nA^qnT{l`(C81M7>1^U29M!?Y_VY7oo z(LB2tL#1KYdBH_QU)NW<8M{dIcUBEyY?5hSwVN`Q4-2z>r~>o^Gk_I|bpxSJR69i? z*tk^j!M0?XDs&@qYte(*3M><*cn!-$3B&|ZM&(<-i=|j@u{W^^;jvYGtCcpvatTA~al~5@8!+V;0cRW?;H* zq*7yzeGza3JfipkCIz?6RL5XcEI-}G1asmMm*9G~m zMf}snKa1JQ6xbmUErZcbyC@baKfJ}ZV4$Qp)1K^V>ibU9^|6U4M05&7q-?f-@aD0q zT7h#2Re(h}#k%N1by+EI6i0n#*bMNzs3nLnnywK?gd^Pq%0b`CgeOIXP#7K4~u)<6|C3NRrWe~D3ln)#eyDmd-vI&6JH)ra7J zp`8_a1PLv@FSPK`?C4C6Fq??PCs2-{ywD|C%7DXg_6^9TaBqgZ2vh|r#DXu(6|#jB zXE4Jd(Fuj+Ty4zSHJm^~MB0#6p&$k}2^u3p`W{SJ0Uoh*2Ymj?KojIuNOo~~)$m%-)@Ws* zX$v-i`~>I^s|_LI^pWV2gbSKn58}bpBErcS%Ro3`u#3fUiWPLOnF}uWzrIXGw)SM- zMq{5CENmJqC|$T6T)0(48ec+64V7J>NLV70J}-t~y^chJsvTVdj^QqL24(YYi?51W zAi9E?xbdos(?4ofB2PjX3_5o@tF+>iNABM$5DF+gI)Io(`pZBJfqOK`mc$K?63dVx zN`?BQSxSc!Q2a%u0kuy|#pzXQ@mc*eseD&iE%gv9V5oCJ1CD>t=3@qYAR`cfTB2?j znXg+P^MbHAsGr7t(*FEfK0K+_~Eg_E6r z+yWffpvRDl0A&R~?K9?W1f7D4MDT-3AsGZqM7XU(er(0GAd0vQvvNDpLEg5fn*}B? zS;#oK7|3-F*HDWBE4x-&5ICZVHiOii++b4@9`>@AO(Q@IA!Ug7D)_qDazD!CvdbDd z`g9Z4>r*q_ob6Y@*GCGdOCR=$h;h&-xd@39&tq;t4S`d^nG#iCy8*s~e$Hr?r1kC8 zuP-*jGCOmX;1ZCKf@r=G9yyuX0Lc(xt?&%>Q*$6@`%>#7EW1*5Wrv-VNgCL5;Ao`u z0k;O6=jvbD>6`8XI{UKv)ex6;%nK|&lHs7*%YijF|W5R?=L_C2xI`Ea4ATZ4R%khEQ7OM!>;*J4-a8Vg`elcz|=NEH4 zY5DR9fJ98|i5S0K$xAL_Nyx`W{kkX!g1cb^CW4V>iKr%Yn*$+RqS@=V&Q10pj z93VF-S|aEKQr?ule_^p<^*1$gizA)k9aSf;Ej5jZ6iDap<6jpgmzW+DroBrYv4{8C z-$Ux?RoGOQ_-@&$kwr8ma}xTX%XVN9jTi&C0`V&8cG0otDqNDVcDaqgAUhQ6t1%{x zk!@0E3Y=rtM!CZwX>g(pz(k_4Pl3IJEjo)Vhi6RMEv{ytj1nU?9X=V8G$WKn?xji{}9&roV1<4B>!f*uSC1 zhU}T4@zf#aGT$xd);(dHgfxbkgp>%Yjcyqgdnwh!a^c7Cz_;OI$v}b8kVQNyg+QUwy4vIS8>ASEHCRsRY9Z*0%utk z!k4eSThB2VCTIt6ut(!Nv9(}Q1b5^K7>whUU>HVD0{XH8 zL-Bwr0Qn(+G-yHGftwPnU44c$E4s~T^(7SA=gNUjdC3*13_Ah(q_Y4eM&(ory#+%< z!*KOl18QTlFm1(rWi@@({yy2R&SNK>7%PUoT<11q1c~ zD=qwc)TE@SBlulJBZRX%x(e)N>@NK_H214#EE40W@H0adhCBkl-YCT%Mw?)m6|*9` z9>l;Gz>kv*IS8B_Zcy=M32}m=HI5gK)-aOy2`knhPZ&gG`G93~1X~y-rBxY?TcFD? z*GcjFEjc~H=M1ah4I+epw z_rao?LCfKZ&+Qdh2*jZC3>_kpK@f0|n}Bik2Hj;YQn#*!sbp$WPeweig_EfVJ4Aji z)+n`UnI%GoWJktiNdgPdbcY}&C=yq*)-8%iVKy^|e2bus$clx6bf()8b#rb5m`1QG zX!RSeclgngWA+gjY9NF^Vt17w88-|T568xlP59n7|9pu|aIsjj++x+AE}3E`@UQ8r zDWrW4u4W%~`jb}SLn*qx!XjybwXi0Q;_?-%g7|XN&tcodd5qFnnfyw|P7c5&bu=I@ z!e+A}aS2cu zhCnaGIqIc5rw{@kWpwVy1AQTOKpsItx)5p-7R^XU;B&dfg5o(k%4z>A@<3QnF5!gR z6g0^33Px%YG2?5}DQ0uELP~x(U{s>^usE+ zSHx=^ub}ipX(ic8y0b!5b6{sD_BP zRp@>ww#)@=+Jr;^3!!`?t!~5@v_N?1O=SE^Y~Es25({wM0S-pGRH$SDF~nC5yqBX% z)!GgayUo5A$E-~yAQH??(g8*Ffj}rwE0oCRjUkqoW9p>z_|H!GCT@*vHYE6h2Ejo% zZg zEHR4OiA(7T$3H8G!z9n@&8=H=!}UPFAq4o->;WkNS2TdY43f(RV+y!6^dX!c-9C&1 z#wDCHVvvyz$uE+p&NiqmZYtHlH_>pQeGSS0DY>o7U4O&6wfUa4HgMOFIH2T~f>7JO z@{6e%151EQmVmMG==BAo7%4Q8a9SliF~3wi5fSCs#JH{mFyYMssU15QA&v9-J9gj* zHWl<9TtLBJw1qG+i@(EF`N~pq=$~J3(gLBLI8BEI<&h+o#UCoID1~EyP)+1G3R(R1 zJY&apSW~PjJ9hMMLa-=~uOdZ!8~7JSNZh-^sQyAfn}Hp}yc59zKHdp~vx0T=U{y#} z5b4lQX#jt>R#mdVSGXg`a2yU_I300!Vqa#LNGa$cn1%>z{^_b!*+svBvuM|sZYhis zB1OhhdmA(UbP7r?Ib zT#(ORigK6>0Fy;nQfCMMaL|SATlx|GQy5f`jZ`-y(L6o)rpH#e2>zt8RLIbVWv>Y6 zFnkFacD7bmvPyikCz!AeQLCKa9g8&)A6VokK^2o%twqH6)S*S6r6d?^8<`~Y;SUvo^RX{(i z)2wtd2eJrf3=^2b26(Asu&DzW1GgAz19q}%U=rDmoWCV-lmnZf`bygEazsuM0Oh!- zpPmwM+Pu`922Y`G2Nn7S2L=6!@eHah3$9ag$#(St-+F*4eS&YqEp8Bzub1uEfuA4- zw3BC-?AQUeIShxl9YgQUE=CeKJOm2!+|k&li;Nr@n05zRm>*D=?WKgCY_i_1BNz{2 zk$2?R-msc6$v6Ur1eLtx8?GO~DJCu-(ko-Q=z}S^`e|Sb?5jj6{lPCo!-ee_AWm3| z%~2qWVRx}b)>2{*$&8a(r#;HI$# z4PYox#i?AZJ%DY~^sy;b!qx>;!9@&V0x%O##V~8N8epz9IaosExd0j?zVe2vugPHa|Ckte^A!k(B3O9^PL2L!m1Hkk4u@YwvNjg{%oe^{~hW&x) zcJdEnL9<=F7KV7sDilTnKnX_Mc(IsS1-xLtVlvi9utK6S^P{p=WI?1YIn_c*K8x*G z2h;@cpwxgaybE|@3eGgau46b1L}J@vIHLo!Pmo|DvXlJXsAHEXKG}`Bc`?`0j;(fO zm(TFD889p&RIk7F*qgWo6n&&G;wt)z6k`7Y2Ehr1bCL?;9~_rrQ>6gX>8#&M#2tdYS`8SU3QME^LTJw5r-p@j#x|GEHP36 zDn+;|3>|v;`;-lWf8ie%3JqB<>f<#sZUU-X2;mrk2FG*?ZeX*c)v=0WD?SA1F9czM z=!V^h^Ccp)5Cu4J;8opN6Yv&3KHWc@WddCq1TTa#V?;xO&@n1Qb)UA0%arg#&j7a) z#1M}8f-)};BlW_mB>RHEB5hJb z1zRTm^~#z@m=pmk?t;khEKv_fq9G|FTAWs zB1Z&evrFQw<$5o5qDGb7^)1S;%XXqCXw=%@rl_!5MdudX20l~ffO)c-N4UfWLF3%g z{JKpL9B|nnL>-9ur3yS3ahWFPZg~m&k7LHQgj)43?qz;&qr8QV(gM7Yaw4%ff@7~Z zpCcx)ESxaI!$9#Z*yiEG+G4&HSOa7svqx+&m>7U#6Ju9Nlgcgit$}^;($s~jiG$Z~ z#erwJM8~zel~BN6%!oRS2>mvkB5*b|#pz#m0GMDdyzxML@HV@)l+n55UzrE0=3iODnY2-QrFm~0h%#bEDATTC zI6cUDxxZLhJ|hs#pHXQ5^d!2Vws}QrCy<87Q-X|88YHVQ91<%Y?*JHqfr|Puto3pZ zK<NbG0WM6D6z}~_8-?yvL} z$VkAkIeadP&{bbjW2I?JkGRHq)^no5Wg36^0qyGlQ`q99M)I zM_!dX`iPLdlxASL**bs&w*j{pcKXfiEOD_zRNYX4*zE5h9C}XJ%d$P~T36kkI}?gY zpLJWilUv(C+27lZW#j~bJtPn>QdV8p3;Cb!Q6VkRxF4Ih97g21!B(h zFpLRX=k~|8f?#W#N`uD{+=keDyu1K^gnGDy*UZ*ZILTTXosrt-PL)h1 z@+6Q%;eP(&;)^aCr6L9*5D>ei-#0P-O&5GI#T(9KgjLq?8}l;woJSshIl_jr+e=7nTKZEwWn!h(lS z2IIy<;|n7G1}xE8s#BtaLj_SKT!?2xEl2P|U`pjQXHS%g9Oj&<-r8j`*N};AhRXS7 zcZkbw!L_>#Hl1a%$*Yy*63+%wC1+8;6NSh7J5Qw!pR5IhN-BqJ<1h_myP`_5Qw9Zg ztVza+K4)(WBOu$CH#kzYM9v=a#N0G!c!nf3Np9`SW4)7MFU$a#reKbUakE5)yLiXC z;#mWmtK9q>aVEvMTuA0u-A|jF!~s@M?h@E=E)482ZAt`{%E&&N8iR;oYlI0?a&sFE z!PRVa#_Jg#BI-BRx)bIXy`mPa9`GbgbZlCVVWn4oc@TPorbo7L?a(<*1W znGh?DEe=w=Q`Hm@B{M^atw1Q~td2LvI%a7H2HvD(nNAE{m|J8E&Kk*t&4ibFi$FSK z^ydMX1bUljvfV$rPsA~CWaxbF==B95M! zuSM7+Nq0P*_lmrn)Xn^ zv)C5t3Q=u+LcT)vgy9(*=u>=R2Kt2sP+$|8tK2^t|I%MSnW?&aiA=e7 z&K2`!!>!F5%KQkRSwqnoKzPPfETn>;Ea2&ZOz^(TTFxR2f}oA$O&SfS=*!3Mq z8ep=W7O^h8prMClHoFYb@tI005ehY%V+x0oG(eEP6)u+KsS86fo91G*X(kFpfp}}d z10c^;x$sWrKQjTyNd=QB+h7<}I@KjGi+C0`O$Qw$+I&KRqP{{B5RjN;#;{1$JCscY zs*;}(gj&s*bvc_4H(}%WjzDU4eFnp=+t5N&oUUSFwX+82z<1REjRQ5!cYPBL16Hp(nYfj>b6&Lw4BvN)q5hV^#@oU06|L!V(B* zCrzTG7KUk&b*Q48y~-^q7Z9K@NYbZbVPX>d&+R}flA_Y2AB^ByO5zh zrAMORSCKRF0;)tz4_b>WIGEkEca%6hxr5{KeEiaY=oWfq=XCA}AnqiNU5ppO=@H+X zIcqB&F#$G^d_Z<)NeT#JP-bemND2WNEMYZpXAl18-8J^MGFFuL<>T7b5^v1HO$vwmFYyaN~0GCCxbiU(BSYm!vBQK14y8+P~MNte(>gB#|E z6RVc>4E2f9EXND3M-1?lI36^x1#9Scj4L#7jYf$NGP90frE{rYgwMD{M-isydPbS9 zeeH|x$J}tWpj{=5BN(KLR6&>7pvw?Aa=qCOuAVi&(@iP`R}3y|kr}bmV19V29N@N` z$fP>_9zyeucF>#6UI#oT&EzeW^>7R($MFZ3Mus~g%L`xl*Sjh>%G?WQU4#r2{Cfdq z9ivT#TS2QrN2<~K#6IFpH9xos_bw_mS({yQrCeYcfP4B&TEw%bL^e?1pt=Nm74_0W zKT;K32Lzy0+16hfl=prdq7ZQ5vCD-3(Z+Jh2m;7-W)quYtjOXUco}C6_goOE<(giuL3ZwH<+Xza+B3X<- ziESH{0_zePeR;dG!FJJ~d5s${$i(&Al6*zumrG+>2*in67nS!ONEStkNU(b07SDlN zwpx?Bl#tI@s~$Wg;ci!2Ebq;5%=t~lto-ftM|O!H&7_nTj~$Zi29*8c7uTgstBkvY zYH|K5C_mvVz2b<&N0WN-Kc0}l-3&l?OlxMf@Z2w7Y}pi_3*Y*f%$Oz;u#0e^0^Jh< zZ!Q-%t;upsXK<@zPH>3fqAIx-J*-Kb)GEfK1a^+}%he!Hz2Q2mEQAh16KC~>!0R=% z%O%cjemL4~@gP^Wr#HT!L_92KF7QCqEhqB*W#<}2G_8|@Pb^df4K4zNun&r{EfL;8 zIXr6?53NPbDJM2mDTz>wJ--?!aH9-m>iQ8!cA#ON)VzHlPj2^v6I%9y+uYS`*H{@> zhZjW1Bg?O+=n}R)^=vvNE*Q^hSMVSKh>_f+BDYckgYZO(F7LEfBOq10YrtJ%j>}p7 zTX3eerqK&o?DmLhtZQd~O~*U+q+yQaqYieGp}-;LMgTA-CPRX7m`1yNqS)OViJQ_% zwv*cw9Mndq1>0D(sX=Ax5namrNFBn@U@c>gk6;x*$&@8P7oUWBB%Y3*8EX)lNz;`X zM;SBiF0sh-xpJ(U-W#Z90cZfYW#Ob)^?clIs?k$qzq8b6`xV+`a9&2_2eIC(uTg$K z^})Ae^f+~+VS!Jb76*aYg7Z(2!kv_d11N^oEbXvsA)_)#Tu!Kya1vx52x*RXnxu3R z5)R&@5ozG5gE)3Da?-e*2*-2hSEkqgL6x;+=)UGCo>nm%CZty;Nj+%DysJ`D*JwMm4|$nwhNKE)~o zg5zouO~G1C6)3_leh)`OYDHD zs7*i#pOWz)rf55Qo+hRaL7VyLN4}?-x8Ybd$6;_3}5I%;d#>wMD$H9mDi+ z>PzX8Tw`cHcFn?!SO$`ow)^Qb@VvllHb~mj=5!Wh8O@K(wQ%NPPQz_-bAE+PnBKK@ z71b;+x36ix&?p}!h#v)xzZPhBIvT7|(t}rOYr6<@o7W~Z8PRpc!Kpso+B#Jp+1Q5y_VG&6#I6lB z^{8e8F7~9yJt)CLuVyqUS84i0yF+e^cZ_No68pPyHlUlFXFhYxOgtZI)C?m>Mj8$) z!KJOKO{LTYhEyNaW$P8%rpA_m)4ffTCSCxib|z%Wedew!N_vH#HgP)MuW4^85`2}ZsVP6piNOEJVrtxRl_ zn!K^Tg^>9sd&ad49eA>2=B5YRit(SG8+poKb)-qBp1foneQ$MvLtl3^r$&sz>8IjZ z-?D4bZp|7{+=huoRx?+u^Kth*m^nP(rh&C>x9pAnZ8q0DR}SpQR}Ppu@z>4#$RYoz zX)^SEt6LrsN7ESu9ym5|^bsf7>5){ z%CAZPbO5GiP#(=S@iG%Jmiom{|jdcZ?_9en1(K3{5~6XGp$b5O(d zyL@wNT|mbgBRwagmGiF5*k<3$ct@Pp1+>mIL#m9Sd$+xZk3q4S`BUB{M|FkI9mv-0 zuUd6>-pb<6%%gR&U`g2-eq!EK=&X^+V3PDm$r)Z0Ic z<8DFdNR=dyBdj*h#b_=_FFeb^I{KotIq7L03R)ZBlmfq57`Pk%1WdC*na?%1N*jAt zcb8%k5k|3ZRm_XikiWfXzCZ-v1M79d=T#nf519!NG~x;Nm8m&3vFvaQKmp{e%N|w#0|d!-Mp5 z%;`3JSc{%R&=ElXYF@Jp>5xybK_~l*<^^y%AaA}fZEW1`yf)wMIUUF;&pl)Q&U+z! zCf=1e*YM&Wpu)l;9L0iNY&%yhnmbcy(jT13w@#^91=`gDjU-3Wb;wJGzyB{sL|>$B#@<+4NN=gi3^y_z2SCX=jKmqEHE!6+tiJkdEzBT|9Wd|LU%8s z-O%yjg7?_^r5s)P>9@_I?S-6Mv9#}*gU`K3=XHAbW_-w?mU-kby|QhGp4>IVXFBaO zJm+RYEFD|IElC_h8+-KlWRlr@H1wnHC}#UMPCquIo-7B^^9&SNq~Y9e zz8Mcv#YdO6%fwF5Y<;+z%lpi^z1?1SG(5V?@PB@fA($)LEdi>{*tTNoNu)jA-iguF zuCerreL9>)*~cEyWIT<+vOqQmUP^a~7shgK%1DdEVBXA^0Lp(zN43}oQxi#L z2cFzdO}&?q-7=0+x7jh$^UJpB5(jq8;#ri_4e6{SiC8@Uv!AIo9FNjZNW%-s+P0VT zBprH_UbfyF%(ZB%(lzmBGbyJTH?wT_O_l8Lkfb(e5*y6+g>*Yr$u+z24k5PR)agcJ z+MYG9l`J0_Gq>v6PTR~5V$^WpBM%^lo67Fm^0pgM@aNUf?m@wm#BBjUCqQDDVSe3t3G%61}@REkKd@`o1B04=2<)Vr+PXxLDr@f z+vCjLQ{Bt-X4q=8S(tHmbYw)jMw@hrIhRo`SErZLtoi)}Mlfz-!M(M;yU|*-yz0pG zyJSYAW^E^SP3(6>^q67#$4KGxLPdhdSOFVxW;gKia)&D*f= z%4ct>6PX3G=|eQ}9XE%K^H^~=aslQpz2&kdZf19yHpmM(xP}nBDYPM&?97FZKGAgr z3LSk=)p|U^J#sl@m3`{kb`Nao5$sKpw7#>~tJyW7ea6ttq4{S=W9M+pCUIbPZo*CO zj&i4K#z$JYUkJ|^$du-IxZSJ7em&=!%VVIEMsBTf)tQSW=|tpC-ennzmUj5`FJ|Zeq`mfnKKL<}m^k?1U!Rb=s=Mv}=iTNY17KqLVtrm-m)_diQ1$`s z4k}*baJMx~M`8a-j)4X#`F`Kh#r6{)_5)|bbhn+|WOnE7>$B^#SFWtj z9$CJe&S;vRGeUZ~(KLeY+_g&C4_%QNsW`@9&JG4S#S5zg9&1n%Wl8 zee}c@k?JU7Pg-E`S5w(WrachV@|EK!&=~~R=wb=`k}Y%2k>NK+sD4&0q$7f~9co&# z+n0|kVZQ5VGox>~Ejq!qGp|Ru-e4M9q!2c})TT=xk7~dCPIf|^-rkkeiYW8Z(UDP8 zp>zg&mqVA1FR6?iGrYon(j^fg-A;nEOVdWMoXhD`Jx2|Xa_NI413`B6!+QIMk6%jn zJS|wdY!#i>p=#BC@yDwU85v!^U|D7Oc)J(h+iMp6?^&>rZaX|-VqYstcYr9N=IM+V zJbvL%8rh<{i!tbwt9fu%sLP!rtfciADXn)&rLZ|rv+kXV?ToqZD@cX0%xJn{a7$ReDl&&Q6Qxe&rMcMe_l%6{ z$Be9?PM_=X+I0AVpIL~S_QD0tl)nUIzFLGS%0Fq3-_DCVaFo~`pUU2VHlKU(u2-hb zJDRrnAA^Y;mq|a$wiqKg{*|qORL86(Kg*P($odLK+fBe+3YC1}#MXIEo21SFwyOy?hRON=zp>ZS!G3M2WD53@@7Hur zb2t6U?fAWCV0`~!{?;$vog7WhuQHhN#%}fE^*OpZ&U|N(H{o0QAWFrE-==zk#xZl-MQuS@vZc;bS6>gBl}#98;;>{Ltc1y|x&dq>LE z{*lRlp{C%*^7b1;zFR|W{p6iBChtOOci2mxq@s43Q;EpkNUh9#aTcNz-7T_LMdr-+ zn9_7OZ-JF>M8**)irsD%baWM zl~b;?f79$;klY?zYqR}kmb%VYjFLXpVA_7_Q{m}t1-YN7BO4nL7xJ$(&*SZ#+~az} zW90R*(GL{2-x9nprAmYC0JC>oyMxr}_YZPfu< zQwO2QEo69%_H#x&Gz!r*X!ed#aqUlJ6RoWN$>@v&HV=x^cg-}~p-G((3AbZ%Pm&`IGkO+erF|pHjC9RU7bJUQ z`vYLI88+OsYqn{Xc;CRCS!!Rm#Ey6NkZFw8WYYv&LhWnB4py)2X}^`*$wxzHjH!4l ztFvV9iOLzXvX$Wx`Mh>yA3bNHP$)XCQbv=2C@6( zRzsUNYoXy2yqs#tB^{V=x7&`wL867-92wg?rwxDfif&n3u*@9&k4^59TPGdpoLIu( zU{*BG^zx2Qr11&VcI~g1(wJG9u%$O`I#5VyI8@4vS5^2`J9=4Vp?$@u^QWvjxl<&#`;a;M;7<#Cwdt-5%4oToKruT zK9kdYpk29=U%hUwm;WNMe36w;#M6zn9UEWAN8UejocY`jRp{=Ok?KksROu|uLI~`y z5X`KOFI_ga`?!ln4>rdNoPofyvd~?sPwqeO+8WT6th`p*iJ7 zcNb9BHvODRPrXv7TUY4XkP1y;Y=#Bl+pAtbYQ?Jh!sCzMbI*XyzAex}+w9!^&&`cZ zK*y8m<0A`3PpH#XP;6?djftAo)~IjR|F0O-7T#V*D^1rmGt+caeRh>;*|vwK#B!Un zy+h(13#nhET!eppoxa~5-WXuIm><8l#||0h3X?IK$BwKTU%AR%rQqF**VnUX>DbBz z;|rIVn@Q-u5%c+1^S-#bn84lmQS9xVOP5{H)hqVCh~sFgTo@fT*JsPM2j&(87r8(O zo|jV-vss(o?yF{Qs4!38+$9T3$0{TH4bwFyMVIsNB1^qsq4_Y}0_#6~g56wc=;5)w z2FCxnx6kMXf;kImy=^Zx_v7We0HA)@vN1YnLDyJ#hBLR1*oTN}XzSbjc)hd2B8E#p zKUJA_*OQk))4%ndb%PfiXhd@wPnbc0)4Q(T%eZ6&>d>yx7bWuHRH@)Z2 zv_iUWDAfY3TO@BLt>i#jZS-g(Iq+D&I?1!ov0sLLA3k_b1<+-PA3d+LjGVrv;VCiD zxd+vGHu9z-XYVLq;^dzcja?h4=r^H2RkI9@xnMuxoY;Hyex$q&G+Dci z^_LzOD&Eo%W)Cfzj@dgl-oSZNgQcs^!mj{2iTl;vyG>@x?IFE&ct`rUXTnz0Nj|>s zgu6+eP)aq$J&s*4x^%&j%ZAZ?sneR%yk@3K4gP4F0n;Ee$K2S~qI-yqc_U+T&0Qt?(otRN4FC>*MC=9gncF84HgWxtozbYK3!gExFZumvi3Q>xQw3^%whH z$VOLHc&?W3doiObT?n>d#iBZSd7X=NU`Xuq9+zcLf6aQsO+IBHcdfGnTBHwuVAj%- zC&L4^WKWyL52VZ_n3OhqA2bhMXqKklEXCVCw( zY3evPjb%cacre&rQ*PNeEi*fUv`AUCVo_ol)53s(N04YB9T_oFhw)^dLH#?NrY5F} z!L9Q=VKw*uCYNHdxtP2X?)P@P`zp^Iv!c2AS4SF&caskmDSNlGNRfsXuF`UDYBWAh zPj09f+tu*aa>4bJ7;?<=#5LV~^ShOE?8D8Dw6*lxNv#Z{i8rPx*+kY-SW2aQQ`U+O zQ+Q*jWP;vZk}E&jCrqt)%o?@*>i5p?+Ro0?h2_KJOGXyC?Yu;1?CsH+h22)PcN8b} z&Rn|odJlK9AGrFH`Z?hdWvZE6ZOU#O(STkbqYJag&Bp7ocEMRN`zUl-@2ZuH7K|rn zho%|~d3x2Hut4aJ=-EoqtUDqvX8R?{yFAH!Y(7y!*}ZH$>)_trV7Jd#AhESai$gue zk>yfh-^f{^fLeA(%r2^{Fq)Jlrt*fYg=UmGYe*lhG9MZ+6gk@x<~^P0dFeMD8rYk3 z^BxxmW=d8;c(}`tp1%o}>&Fade0-2b2K$wi0(B_!lyJeg*`)VpdUlYt$7-X)uU

  • ?0hFULogGB#3qoPEMQ2YF5U_Exyn3jP0h1tlD~48=DjP9s?!DQ z?eilK=sPuZ(P?vf(q`w_E+XSAV=rgQNm|1r~Vt%dhcoeo#-#62BLF^Ri zJ7LB59h>}?ha0G}hwp%khAWy*<5;En2(#O)i7CGG;sx}52D=RJgq0ezUY{x$YTW&N1HZ((>Gpw=`%ABk}m%?Z7&Hb9eciv#*X>q4|+Bc8M1-Et(rU^IWDSd#6^KQ`mf766x+k5!q$8K)4*gc$_!yk`#A+Y2>4D>$_TxZy z$1r!-_MV%k)Z8VtNlCWMb#y~k^yxj~`pU?$m|Yi|`4)ZI#61^}86&yx%;ifdx$Dv~ zI=y8dtLy1fK4g^Gws14$@FauvI4yMl*d}$SCR*%|F!wa7MdqfxJ$g*Q%}lc0u-~-i zc<%=F*6k;fvkCGHzp0H(awGOor95J!Lk-6ckB-?RgY<31kx_m~kELMa>0Wr|DC@Cm z*2x&{YMmN%-ThLzKAf8CsX+1yp1g+)Wdhx)wwKYaJK-~%zQJXmh}ccUPE}Fz&Ad(& zfp)!{-zLGU*!Lyf(<~f}~CMyWII_j!Vd@eX>P|d3GQ1k zLu`v@@1@xU&u-GQaBs@pDYu;WW=3)d+0*Kv##h)^ig@{#bg;dB$rCx`fzz{1TQe=R ztkind>>DPY!}nT8pFXQc8PdeIbGr??+plNn3yaOED)a4i;{ytE;w(99<`$T1E+V!x zma`wOCjEJ;=08wPHxfQdp7m~z`#HMXZy(I^lLnd|rfK)Vsi)j48h8Abmgi>mB4_&j zJ${>wTNJ#_TT&%nH0s14k~sU5Y>vq>UrcOwdwmO-`b)O8tQoN1F=9u=+xK;O-s;}N zECOhkYixYM=(u^I#x>4#F?x4RuUzTfLvL#!EC4wM_x)Bss(WjM^29m3tP?|xKQwMC zh~Fuuk<%WyP3;~ARncId6MLI2EG~0CnW5L2jUp`&$7c8Gbu&AOW_y=z>o5JvYQhcD z>#F&_BcE!|S%=ZWqkMXTfgjYc+PR%W;<-<@=g+g~^%m0nLAzwJ_MqnNPNUePX|BfF z?|Ep%pH>0T)cUW_>ZsfR?*(@vAFWuV#v!w|D7SNsW z*0DD!Y+q}?=8~S1j39*ih>z-19?^87Fx*x;%IUqZD%|9ny<>{LH>a2Ta5xbs-NXhH zemL3jvx$iXjwzC8sI}Co{^0K=izz!%^}%YvY!s!sy`8b92RoXv>eN!-8Rosy^u)}) z_LXAv9!SfMoH1A`;_kdGOE?{V4a$$&w`*zzmvmElx5|W$Z_FGsHBHjCH#+U+Zra#mVVyR$Y17;OMwieo z7#r*DGf`=JX|9Tc(<$iIJo|z5;W`y@=~2!O<`KJls!yFo+^4ycE{~DD zH&dvMHJGQ3`Oh=PY!_-hTbX!ArzX?{c6X;tsfv~iFQDzZL>28WM!=6^fIed&ym_Wc z`uZdZft3qJ7tm^jo{?xedZ0PFaK(6-wAU;+%r%9>{6?T{5!T@HgK3DAp7#B0%d&U1 z(2pS*8X^5+8kcE=eKXyu0XZ|ES~tkLaV>?1VNGx9Sv1URe)BnyK6(%(1@Kg`og>*& z6PmE6w+2|9D!3|ch{+zbnxldy#H4j^tj{GY)OARym^@^ z%+^ObkLGIF^;2m(7hoanle@hQO|VKyj?>v4W4(aP-3MXcLNDYAFM zsksy)a}79_K#4t!63)4z8_}2(>(;%yk*-oH#s^7#^wD<@6XT2e(}Y(}l+t?U^uvWt zH6~NetP!@!%%t_|_q?dgYtqZY3AnrDvm;ZWO!L~Qpc~g$(1bxMuJy9CaDbuB=1I3M znpz^WWD*4%F3#Qem?MnTR#QWEgC=H7*W)gTTS#kzT+PyG)?juSowp<(0-5ea`pMiW zbBJo7H*vTp%5D4b#1ocI^)N@Nk7hc>YlZ$i@RQ) zC8&S*hvA0TN_4hqp@+)tx*hUU_QXo2yPjoCpyj7{vH4Wf=G7M+KSJ`>n<37B?JePT;?@h*@Y8fO%UtcIu9!9I=xXPh(G4%TgQqT2RGCAz zsbI$BtI-nCScv9MRE*TSDDYhklF}h)oe2{KY|u_&MvojmYUzq_e>!K=FneSTUpB^? zUJR9U_l?&a^lWjg(TxN1^dCu59T%CAalUXWTZM-byW}bdXm~;~P4(y0;O7(lJ zvkME|evj#jSz0CfM6dxh@jj6}BHXCHB%Tz+QzA?4#6Fl|)F1&?R_-0{?2Sc-ka`Q}$|%+U^)OO*7s->(u($wC4#lsa$vF zhzG+%29wzY+8xhg%)$;U8gErcEg_f)Yj<|(+?fRH5$$8Hs)l5#fc=Rkuq@>gbvl4e;S_b zU5j4K-BUYh4Sa9Bl&w24(`TKhja`D~Izx0cf>Wo`;Q=tWj0W`^eL|CX0W5L{X1>*K z;++j+=lC(6n31g)3>_z5*_5uyw7+fCqLQmMng)!G*Xg{1y+ky%HRTL9F=yu9nsJ+& z+3l)N&&E($sD(yXw+(iTK}xu)3DnuUvPYVZ0U*S8hI17Wlxm&9IA6g z**&Y*I>7YON@lgKj=b{dAu(+#kB`oF!$$OhiXAZ2)NE;4K{O~jomtW^#*P(MyQOvH z0AcLemGfruMti5GEP6{=bg;37rR!IYS#T6D38dY*Zh|RsR!eh@-^| zuyq_=tWLnwc=N%A&Dy*DvWW*s9^IpZlB>yJiCJ6P>9lS(dvBb5p>-to^|_pbOL(}t z;grz3Png;d7S0{rda@4?$Xq;-*fx;dqwLLm-XtxEK(fm=a_aGIi8g#Y-?ZMqlIb3p zJv-{tXN~FBqr-9b@$6vvD<0UTPTGptX|AZtU}rv@W12wgOl@6cPn#ytVRfsW`d`*W zJbUbHU~D}in0$C8rk;Qm{73c;Pi|u2+j>6v9;lcHxHRYSw{Rxrwd2df)b>k*t@7v? zY&R80=#9@&GXHY=`hgzYY1n=3rDSsKiQi+%Tu|fv0G>*vG|sV@kxrw#*T#Su?XZtN zzN-r|I3(AiRx3k#ENHOt?HyNd0n|V(mr?=82ij--yfeEZ9LX4bCDBLrt8KYT_+2geriY!F!7Ma zwb-Ej=s^gKmu!&+>!ax2YwBchpV{1Z`yRc~BZ6+v!JS*=S7M!NX0s=;*VDSeB9D3V zUaHrrEeQA2q*05APpT+I!+9gVJKMXP9tJ7fy`4Po$~Pf*-#DV1dra3~MK77suzA$@ z65E-A{CXR|#!m&wII}9b7!MlI8aEmLz>=9snQ1fbuTET6L#OEIZVAk(L()cv;j$HV zw_%y2x8x1E~-1qSQgHQl96U&8kd8lQwd>1@~>2ma)bZ-qQ8g$pRv8)wVjW1osM%tAG%dJ~q zIWpE)F#jr;ua)}Oh3qy*zHN5s((xs8SK1tDTeP?Cw@^O=!veM?X+D~sAY~sT( zwWVWA>GJBC$C{9?QkhvbpB2_rg#-h~zP(! zEv%K+7u?^kIGj|d0zRxFMiQ)NbO(o%3Kc-4o>V9S-mkF$mAk}0O0b^MPdOwaKoCwU z3;_N{V=ZE8X8XF4Z33rkre2At*@~|MY5Qo&tQF%7WaGZn{uc0Y^nt1_Tr7l#0(T^Fvk;_KD_yC5(Dgs=tqGK6ep`zv*41^jC zK!a&QG!SYq5Na?04KCG$SsGNOz=ME)5>YKQX5cGPz0iMBVIH6wa~_JavuHFgEXusF zDDzO1+caTTQ4V9hH!7Sp1^7HEOBE2Rm}tNcXf!|+-9!W4L6oQhq7)_?@H~wMcruGj zG~jP*G{CzO(Q*{C8^!#)=zlmMbfz4;fk8j1aCi>(Sztd}*arc@NbIwK`=vaCfGDbo z2K;@E1_-@Ov}THJTsUie0h*19W);8y(^sp45j8=w>BV+D;tjqlzn(++ycbm9uVDK2+S3eaBj)^ya2Ky+c#VpX-DJ_bh$1|pqA9O%1wNlcnvPzynG=15 zLlQZG{(oCm<{`=MaAc=d5csR@In_OqYJQk1m_m$lsvTHq>W1H6gMXmDNJkV7C9(1F zkGE=B0C#1Pjfa1r>&{77rS;^h!-#9cK8TfdWbpiiCWx+fQTo-Yi$YeE) zQqg*HD2!49;blTxzMLWSiFgeV{kxRqiOKh=8v z>6a2EST%%E0&9g)f+hcboWcDZT3^k0^5#yhsITTYf95b!ouJkzfr4R_5@J6o#O43> z^odkwNEB4(UzkOSiy;1oF zL(}$v_t_%5?e*;0Z-4CN(4+%CLSfEZzKzRBGTQ$=|~~KjX4<&|A13PR0Vv4h-!e{BB}yHg~(I_8mt!$Dt~2&JZOUv z+)5PmWDxzA5S2orQb-gyBp5)6s=sBF=PC>OI?^4L-D?r$6p0cj zcmx;c1P)Dg06(O%pkpK5YB5J9wMHoco+YWu7csm?^f}Fw-RFPdt5Irjmqyjiuhz<{lf)x#jUJ#-Z;MSsK(276GKbUM|h8L(v&FauGob#AyQ2sB5e^(I* zZW5yMYv(&XU(aErrTvkM@PZVj41zx^(MOozW7$MYSraHrc>${4H9+W3v0%&>MS~iQ ziI8FezD1O(!I%gs7T`RM71~$K0`t&30x)5z8C=%4OPOnXGTcu^u*}C*1o$Zxox|_~ z5tSceh>(;3d_-gIz@2U<4kJ^Z{kg1K<}zvyr6S7!CJKmB?62AREe~~rRzgI`26iP8 z3Vfy#g?a}A^BXvJ!oI^Sf>Eio;7%CRR}us$52ZqpKu98Rf-qu9*a(_Yl+Ut88>DEp zUok{&waCyAt`YN!*wb; zWEBniZ>IL~~=_Rg`zj23{oL#^X zF3q?!-s&~nf$x?+U45Rr-JZnC)kfup_F9uNFVo!R&t%8ThZR{k5<2x zbwS6nLcdC_5DgxgTOl++_EvZ)tM_^?lohVze;l6_Ucmo&GAZ1ejSGillj9$7NmjUs zV}67~D@5ZvmFva|(RXf63a7At-(37W8KmI-MXa`d_1>Y?)~}9OZT;$q)z+_$SZ)34 zh}G7wj#zE|>WJ0WuZ~!4{pyI-)~}9OZT;$q)z+_$SZ)34h}G6-TJ5&I`mH@_Fh0h! z8Hs$0k;u0fiD0ecW%#O{$`H+tV!?cuX{^V*c1W>&gOJEK2Z?-RkjOU$iF`wl$ZIG*_BEz;XDp0;j95Cn!_Q9u07O=uIG?M1@G^VtjzPO)We+3tBWo*rOKDx zd${cH%eKLr4se|Jb4c6|ALK-jaY&*MbJ<}Y64eiOu|CWpiJoHkoQhx}6oFzzRDY6< z_V>_u2dMGRExQ~)&mnOYyuSyz6#pZosg85(b&{&;B`Q4sUPrvqfi9$G`3KJSdC5LV zbqSm2S~b&NTtUZZMg4+{bhj3*k2%pks2o>SUC!U(Fj6#Rlc<$>hqzQrIHXi)BG80H z4{**2<9?1cxI{`<-iXb!nV6{@u{RnLI#)!`NcAFT6r56_?1{cn@cvNHjkVvQd!zpm zQ6#$v_jNUAsT(yT;E8 z-P4TsyK)@CVQvF#V2ATi1INRh&nx#0TuPLhvfF}N(^lG^eFK1Cs=9&mmtB2tuy-lIIN zIF7?e#axz4gKgPY!gmr;J`EmV$M~Q+NWUfC84jFha(1_wozDDcWqKzj6D|lqaNF5;p0Y+Dfg2@t6 zlL!Qfq2AXB5$ z-_~r*W_r`EystHh(7T~CiF}Pov=`?N*Fz#GgpN-lUo8@0#`X(oCqurTli6ypFVzYb zC1`jg3c1T@dzc+kET22yD7t~$_p3$8a+Tp>D)PCZFM2g)za_^;Y7&89IGYHwMU8AvOxcI+qnP90BtzfuHMHAybi3zZ{C7WPq4U8d5D{h^@5mna z^AYXF1eiz=H|7%9vLKOf0Q`F|2DCB+{HBQJ1Kud2D&VG*oGfyXWjBufbxAc8rV5p+3n15Kt+H`b=D`=D2YR$LS#C)37AN?Hb=TrB;7N?lanyN zfk?W?bEF%Lr2Auzbn_zV9t_g;Z0zi_h(p@>1k`t%>wx!YtWzetglBU&sZjq7!&hzM zej?F%+)uCNkVL;^xNK9zssWCQsJ@cl<@pbJr>FKTL%jS#s;=etTGn$&qBph)0ddZ>CmRJ|UmUJq5TXQ_^grg`^Q)||-phVoYE zt8ayV0j$t3f))COutL8WR_GVR3jLy3VUEIFvF4PpFjuUJ6y}OGk-}WDCQ_Iy)T(%oS@Qg}EZ|$0MHJwDf@1Pl>A-g3d$I(uaJF&B}%w|K`&8hwwU$_J<)9 zU}NzMLs0)L6@7-`H&lf3Y{y-Na@;ZKHA8-FZT&JQ*`32lg~5{;o+_dW+yWXX#rht5 z2zX% zRdfMEeGhIB28XXEdU%^>#H541vG!z(Y@^;0mPt5Fj7QtZxKzX2t}m(xZ@d1Rn4yNA z{xT_G4P{PvS`0pDVdM&71`I|df*DYD5`|^1JC5Y|Dvu|l*M*vATe3t6FWBP;aBcq_yxzgXsXq>^8K5xp;3QwjCHXiY@#i`GQ+ zzGzKE?~B$%^uB0KMDL5%MD)HWkKUK884C5jB&PQzF}*K|>3vB|?@MBOUlP;%l1#nl z$~10hHf#L?hvcNf?lR;Xm14o9UlHFj^bLjwOAi`C51J=EXb3&%Y89ae{aD>~*n@`9 zbKtf~iLeI^p}WBQQY^pOt>iX2&Y@Wc0pF&w?yPiQC$1^7{wb@{m?-5;|p5k?6t5k?6tfsDCz7EDzEck_=V14MKQ-@qtR zRfwZe0tLe;nF=aPP*ovIP*sS+F1|pQen5naMZ z1pb@Kf?^`wtsEy(n~3s&L>hDCzL`U#Bk)^Q)&;zy%BBPF zq_S?{omDmi`0Xm|0p3+*vw(*$a5~Yp-P&HNz0}|KoM1hNwCMvlYh}NUO51PK|INWZ z4oL)fzKF_zKM_&=jxm>T?znIP&J$4)aH)tYfa4;n10tloZ@^cs68aq&Rz*|?L`Vq% zmrAS(;7KB?1FjQMy8WEL0%J35+ zsso-YqH^DHF2{p8q#XZpyc7MMLlSLyf)l-kLlX5fJV-@L8IGywY=)Pq=qC*CP|<@7 zyH0f35dD=5?^n^;A9BC`o&_d`3klpW^uvz+3Jy&y zhQC$O%u`*gIUG`~4Igu&$2p|t3pnR=_Y=js=^O`t%wgoo(1qN`OWd((@ps=`{NqE? z+k+wK%be~)P51a*{Nv*6bXRb?->?&){GOSMe>}%Ry78*u{e6qmU90)6E#e>Bi*pgX ze@l#9-kA;3D~2h1!?1TTC;VyT`50U$LVAZ9@JF02 zwJLbi#Zr_C;8GD40gqP^;93!t06!|CBH*V)R02duMhE<&#(EBL)HNk~C^w1WCL$Vu zOGiir0z};s{Q~ye@Z%H<5Kfzn1Bj4BALN9e6VM1^{&kEMf>mNVW&O zPDe#R9ViDdP$Hy+fER{iC&o}x6;OxMB8F3h6bn#C*dj()gcJ)<2i+nDU4#@1@G2RH zi-0;b4`66UNC^RVm!Y``r~~)_25^Lw5b)QU5KxEt0Sxm9DIwr#r-+vWJYPfum<%AK zgn&9fC}MtqkYWMql%W`z3>3USOg=vs;;$?-!B&s9+`H_7*+X%=ee0Vk>mJz$cw(i*12brp>SZMlYK zt&`>&Cd&_N!Z-6|`K>Z(u7&gH+I;@?NR9QekGQg(#UZtciy2;~qJ2N=V$J7}VqL`W zD=NC0;dLr{mf;I30-SQ1<0P&fcwRqK=J&O=Jk>l+rm?ki8D6NOyBXfAqRl?;Xs2;V z^nYgfl!`9?q>FVKhZO7I);ZA$9Fpk5vz+Ks4oURaPdU*G9Fl0yvz_SO9Fk}Q!+TWp z2*W2;^p0~}!d*C|ghLD$tLPi&x>(=gkYb(pStt5Dha@U^fAB8ni+34 z&hdxfGj)z1JdrqpIk6yV;9kEWo@@xdG?6_fmvBt<^aH*Lj)_EAZ)(>WLf6qwF%+&O zhr%8(gq5T&8;8PWV^H92$DFXflndS;I8RgVGg;rW zRB!0pr+Uv}ZsZZs8=My__eYrXW6FIpbHeUKG?TfJM}!-kPbl{=dyiG(Mapkvzp$(Alovt>Tb~PGxwyihMJHbCGgi!=uXgWOONm;A$oMF2kEuQ4S@+iG482KH`5K=5-*lrC7)r*+%rf znDzah>W!t@7TUMAVmL=dpJRxg7~ziCJK|lDGuHp{(qWwlO(NMH##;kpvRhEznWrCE zh)^o5%J0>gNscN2C$H^nj&bgApCIrQpXR-hZ_0K_ z`D)%(ctCa+%KLEB*;lp_0-}hL;KT-JT*+g2^gT~UVeB8wkr`}|-2tcc+ge%3A#v}+ z1Rx|4I6tf0qs)0+Hg}{b$mV3tF4DuGFY+J_kj<&t*_kjT&O zVb)i+j>)*(MX_~EsQ@pQ-dl}m5U!en?68}MJZNb!^vl|J7~ZTRzYXTo8#!aZB_;1+2lRkAqMFZ*4cNr4 zD;6jlYpQ7s;p~WeTh6~y8-^evm>zbIaKnEiHAQ)Z ziBRfDs(ht=l+C7lrZw!D&XEnIpq8+Yo!Gp7h+}uj+$TttPa|+*H=K%w4RPd=ue9L| zwRSlx2S-apyYL!omaMh%=pEt4emn??8=P3;MfAqJ5SmexudpUqPLk*-UYahGC2F7~ z){IC^ss6%5-I@vnt5oFozzER{jtw=48w6NSlgKB6{!oLW{E>;Ub|=vhynG*)Wqd${ z1v*kws&{g1q$UvvR%H_*TcoB`XK?JBHPufTZcve5v`DSp!?Cf~O5Crwpx-9M-ZqJR zqMdnj?meh`l2-MdXZZi3pQ)WEB!d5VLxg$h)f|+=1IqMwP zNf&S!NtLf?;KXxKBEq9kdFS z1cKX@=mV??9<5R;Ji)~Qg9s4_9#W#2Oawwo1x^q~i1PI{WCQaN5iE#oC(MoG6ZdG9 zY{wSdG24>&aKWKjytEH7CkTz+%!vw&==~$+hLI@>ESRX03z-Oq0w~t@JaNJU@CY}y z{n0il${c=zjwk7nRIlYOx)Xp1o~9Gdb0zieYy z7?K6$>*C*Dt8u0PZ~U?b>G-#497a^?3Zew86{MOaL_w7t!9OkG&{zO?t4*>?(#^kZr*WnLe^zCI<&bVijzbmzJY7U( zz>kZl3JCIu1uo*)U*yn~1$amHS+TZ*bYIon0H<>@%56)AV2UWv!%^O&nN0!yt;)Ln zW{7liISv^G@JJC20UjlyDxhB3G6Z;{#sY*2kxLGsK}k|o00%`>23#Ye3ZQZaJMti! zFR5y$Fg#U6ML;DAW~aD~<^fJ+>KFJym30CCQDxJCA5&R3@Y5=r0X#ZI+J28;UC`qh zPDj-RSm9)3;zJpNDPnaHJugHRK)qh477_)P*_-p~m+XsxB@vZDv`|HU(F*+2#uB9n z2tsQ2Akd-(mHq@5sU}48k-esx?`OXm$KF#?4fs*k@=qsoXc__VIVuYr57K>1sH=dV z6j2!vOjNbL0vB=Y?`dXJfZw~Ri%m)etp@3SD7jSudpH@Hc7KLoig=ljr~;yyLR1Th zf>v`IYw~VMH9t%h)KQUR4|8ZL0QlD`>-O!2bju~TD&V(8H1s`&V2bn_5aD__ni2zk zO^9kCQBeL>9Q$}lRRsJ`5tT!tpt0Y_u^;2mm;`wA71AUElOWyml3NvUTTVtxg10dQ zQ)HE}mZO}knN0ycRb^ekA5+KQj0PC!~G>c$SEQ$;H|H)8`~g z5!bG5!MV{JWqU9@Kt+k1E&MbS{6L6GAeb(xg4rFi|C*#K|D569RRkiaK|OASImd*% z{0oM#Qlwvk^Oc$koQJ6hH!Pl}BEU0LU^<2L2zF%>aHxWj(-8sB9MSxm&y9o3A19Dp2iFd!@r89MW52fTJR+ z03xLJ3V4ge8UVagL{-2iL^J@1khlPsZ0?jHCC+8|H5EP1@b4;mk>RVikc9g(e4mO= zXLy;4zQ^!Z6+OuCaTRU;Do48|hoskT4A-d$*G*lnqVF@jMMb}2c)yCCWcZwl7VtoO z5{H!Ir3`OS(aT@$^xA<#idAB`Tt(kt_y-j|!*G+SVy`zc+(||27+#^GzcYO0Yb4>; z40lw~z6|H9Xa&PpyjBt}XL!1b7Qf#8dJKnDpt;kX=qL_JbTq?dA}Ru&E}|;nPen8U zSlCjO0GuqM;;9S~Qo{tiN@5KFUN52w;L}?PJs?8j0-XE?i3NxrN@aAv>HpC`uaZVz zMn8DDG}JO0{d-h|M*lZyTxB$_#nO_>Xk3@62#xDi(rn6TTnkl%#8kI@T9X0lZ z)L2<7#-Xr!=A(LEDRnU))uZ(sOannQUmRr-5T2IW(2M+QRa!$8a8yJ^z^{m?3aIY1 z2tC!E7DIPhh3e`~i)dM&lmZq3&k<1-@UtQ+0)AdZRX}yk#n3euL)TmkU2_qx`CZcD zi+~4-s0uh&L`A^EMN|b`D54_Z5)oAamy4(fI4Ytl;Bg`<0)9wDRlt))R0RBph^l}( z{1n6Crx*@D#c=p3hQm)09zutoB0Pi+KSg*59e#@N5H;}-MZgb;s0yfqR1tpUTN0}X zc&&)4fY*zt2>25bRRMo4q9WjLMN|dUVX=r|@%7@lis7(Wgn!dvu?YXB!(uTU7K`Dq zSj4cnN*rSmaJ7i4r!zcLL`6UhqU00+N5oea0mnsD1w27SMZnb}ssifRT!btBw#I_r zT_t|E8amnPo$P}DDy}$K{KM<55KmPJCw`UCQ&mDwRS7*+1)fU1OC|I!mC(CXLhn+6 zchR0#342~8?0J>2=T*XjG&I#V;{T3|)RIw1&itXoSZY zGyxjUci?uoqqN8J32$^%U*(WueVyUeD$0@FZfsZ7Mm)Pic!IcACW__L8J?@69NDc6 zOV`b<1Pv*!0SbyzQmh==ozC27k#X+Hyw2R6LyEN*!#OI-k=^IQ?0&`~bbz=98?g~z z-jXrT;krY&iF0ETxUIH}9T@JUq8z2WG|X-;_c!#vxCXE*j7qU`WOqQ=GOy+A(6{5+ zp`a*bBs;ioEKcY?xQkZ9TVqXbg>cT80b8N3y-zo(a5%`ekSXKgpqezzrKecK3{O?j z_Za?0MQ6OuCAgkLN{E5%92v+Ce~{r5D#BU<%ScLyv2C&}AP&b`0*g9|g*Jmylc;%_ z>nL-Vl(~*FKOv>5qco~P9U5#U>y>(_LA{vqq z=oP3u%I=+9ns-S(mR2(Skcvm5bXHjkXO*RJR#^&Xm8Ec2 zSqf*BrEpeR3TKt2a8_BuWMBmkpHv`>;a8~$gX-&fsGwMw$LOrGgyB(Vl_kt7HXXb<$Z&;jFTB9(OGCDk=~%;w%{2wX4FIksGhI3iaFKI1w`n)I=3GTyX9cmEeFGH zIT&`!!LVBnhTU>7?3ROJw;T++YUMb7)!&@Np^&+AGo>kgXu194S!(1uuHE-mPyZL{whE5KIw8@8c-< zb7+hV{JhG7wt{qj;W(rK;4>mB1A-~iq6&|@8rqD*NR)MhF3RN`8XbYRpXYvyIR>P= zh0_r=;KrPchJL`QBB}zuK}0pcZV^=hp+aOD0S(rR29>`uL>}a=z^z1qZv@eQ2~jB| zDuqOW-+=*?sQO#xd|0^)-rpVk)2}5;8BkLd0bkHqDCO(KY(+pQ8)=mHG64w5MxH{2 zZ*i2(g?K*lQZojQxrbx-a%f!&<9$`u1w5d#ZeIg%?apzibpe({6goBjX-|n#0X$Gd zfdfTedrOoOpr#6Jgj5?qbjIY7UK#z+0=V3wV~w0=>cYI*t=*S>3G3Yz|F!z;mwax7yLP`92Gh z|3kvLQj{uSMMOh@Ln5kvmf=@KH1r*Y+jEAI*-DsQ1=)Q~h~|R`nn%8s6B3m`^d%vx zfoPg&RD*Imsi>dfY!xLjAE*Y!>jhp6It~nq$4bz6B_JI42~h0=N~YqZ)h@!<8yp#SrX~ zdJefO;C`bJ9S-hM6@}b~`-+462+_L)c)EznlbLgxaF@?#c(ICn?k_XJWkOU0yhTJM z=sjDAN`UVZ(ZJyh7mKKb(ydXEuK|ki3CXSuh$7M84-F6|M0*N%85$fTq8cbF@D zUr$z^_i#FD>VWU&WK^F07=npJSwy!9Q3Y@xAu5Atk&3d2ej!99zz0MWjIwK)@DvV> z?SMb5vM%6tDw_`cDV22tpRKYPz~`u}2l#U;n+1H^U;Az0$j7V=ULAnKU*Uvgc);DC zcIjwfK7ipe6-{T>T{)y!r!zc5MHe!>R7Jj_p~1&Qg9_l7WOw+n3{jZKqel>(DMTf} zuZgJQXOB`QiiW(tDA~t~<`aFFV_zq!ihy^Es04Ejia|<%M~G-(jNu1GR9edrMyAJM zz6Pizgwcwtp;TWIrOJS)Q|edH6CrT{&K0F<&{LJF0V0gHiyGR)<-%3IlHqqnR0F(D zMCDr<{zOH-8HzmJI8>$`!E_W+h9pWQi$$XZ_CA7BFW}I0cHj$D7A%*M?r6!q3V5oB z%4ae>OGSX65m8VwaDPyuR6qokBGX)?I!;oR!c;-|ALlxFl0#E|;HOm91zh-BzqK;O zn+`l#W!=D=sB8xC%T?9`yqU^o0e|%wmq*muLE&kfkenUh=A4eEN>dqvDl&Tl(X(7i zGAZDzgy`^)D3~gN=mjAv0d6HqR{XZPhJQLmq7(ry7EuY=?%^|`4zw^!d-@*2q_le+cg&SRHbSw`Pb7WRt*p#aRHto zvC5xjh>&6dp0BYkVE6?U`DVC?`^&`~njQdriORZwzpAq7z?ZA68~7V4n*n^K%6fpm ztFl?ZwHy1bn$!bs4~Bavynz!^4*=ZhdY6tI_Tx7=_`e*IXonk}Xh#l7v^&E+RaEf) zkllL8t^&C2`hFXk2J-C~Li5Pj9uk#6bd=~lbTq@ytLQ$4{Xal!t);84N)dxkCa7U8Z#gB>_EJurjGm8kCV6jpPA zPZk>#0lz4s(j(AYYPd9&;Z`CVcss+HA}W1^;W`!h8XU^TJWT9c1Uykhr9GM8a3um< zETVxE7@jPm5^_FYMZN~daOqZvF^hm_iKqmE>y-%b77-2H&G3E^l^$k@ZW5V_4YR4W zah3tmBFX9eiDMy5h+Za^DxeuOq$=taA;kjJdaZfYT0mVOq*Q=fJ+%R@ zDeV_!KvV!}03#txh^|({H?SX?IVA*)w0PK&R0V9=q_Eo@=>$QGKZ1Wcl0!2s0bZoC zF5tx~n-080W!=C_RW<|oXqEK<|C`EY0pE5@f5H6GwB){xElT_k3SWF#f<_E8m0tbiBZ=uCSthctzJflITE*t7Z@hEJ<#KhF6` z(V*(tuJ8>e`WA=8y^sl(DA7p_Pf^jC49`}P&wV>*dk2T6QvlzovM%7eR5l&>*DC7< zzFTE8fPbU19^m^`HVb&K|8lmBb_ysw^;Q=t(kUKd&TW2#D7Bw4y!6K+`Z~ksRCL@= z+^;8cNN(#chIgyzS%%N4=oPH-<{VPOsSIDQqJsAa{kBs*_hI-SD!PW@_f>Qk!^WkF zsQv`2{FJKRvC?eL6`~bWUCE|hCnm1GpL0G+G^l=y;hie-3{}{c33gDT4Ge#yqQ5YF zN<{_coSe(Fv$3yLL@=qPSlS2~S!*JHmB-S?>ep^LTS(j-XQo@-GXQ}8Qh6k%? zfZ?GkDtLd;?}w`A(+nee&QPUhGEAr*(ev$`bB}7!G1X6E6KgG0zrgnVfta{@8teHb zQN8*&!!A)OaQH~v<<_zsAw;S`>M#ZcwzlBuCj0B&@^}8pQ)@1_?If1 z4!l8S-N1LLYzFYJRMrE0ugYdA-q)&0%^jEAs@hQaPn?iOKfwBPt}L`QawNlz3XUqW zTLPk6g{T6!K#1xgQLvc+qT7V11o*g!Dp~@E2A_4xet<)|&kXQH5tXig-o<)|Ls~@x zJ}RPt!V50e6b>oY3lP2Neu^x=-}!gSt-N*?!$(#0zYMpPrcpbJ;XE;M4Xx!o6@8ZBttvuA z>@UWu9l&sviqP2+lJvw~}&gn+0y$`RG0bx{LM zH2=?W=mCZgil_$ou!zb}GJHxzwZAgd zp&-~Kc^y~vbgBI^AQ}c4>jM52A(a{MKP6%9pWkfI5D=1HfIDmLqm96uQi1Zd5wwl% z)ud=6H5CEE=aLc=ZZ=mjEkfb~M2jNrzs<24x1_r`7Mc%n0iyX3*IzkSgEsOfu445N zWk9$EA{u26fsnET#85-AI_We}p9m>Oz=%7*07Obe+(EvoMinEZ904&*5q+-OuZ2#d zd}UbssA`0iBcRrP4Sob6#mZ5y|HCzgkW>f6h)P7545*tg1H!jc!iZ0Y!4Xm{Ky}UK z3D*S-j*#d9$JvggJ?a7>#R6>T(__^G!y_avK&^|IPrrlpfo~_h;H41~7ohgUU@|h5 z$5n(x1gLYL8s;ER*2JW>afS#fAs|fKhT&s_DXff;as+H_ z_=5MhmRFjmaA=l%z#mpw7w|_^HXZomD(eP5Ph~TJKdrJJ;LoXS7Vy+N+)99!d{<8L z^esT)UvffP@?}L?Cs8VZ7yiOg(}D0y7$UdG8FA!wt`L;~e=VX4;1`5@e#kxFSMm!? z@U#$>7u@cYd!9pTGdnZqOyMpsVtBrakli0t^d!UQR0OyYo0sT)rMh{A{0y1&%A*Xy6j9(1j-~}x zknm~At_ry2BSdc`3upo(iy`c%B##0^3%0XF*1%0&-2i%d;q7G48M6`^#|1H}95h7GG zQWFsfpe%{}(yr$`ZsgGTQ{bCa)&+cv%BBPVL}lH;|E01Sz;~;x2Y5-B_|EmdZUrcJ zKPM#L3HU!-U?3lQj3KBZvp^8tE<_c;+l8nMqUT-(?(%l5-cB5n1^XG!R*|nbvfCip zl>i?WQSbx-nmnu;K@+HvFZ&s+88jtd0H`(!W;QE1)tMZcmH<5Mby7>W`-Lqa;a9~V zRlpUmh9zq!F+5pBRY0(lo<8}fnDcAGT?WymLR21njmu_;!$>;^!Szbi!?BT?QY~ZA zN+p{5TF1Q=hY@aY{#?1&O?9cR;gC|D#GEH9_oK}DbT&6Q?^Evkm~)B}oyqW1DtdmJ z(_k`JT|{qiKB3(G%)LbQ{sMD;Nx7e9ZsZZ+2IodvOVAf~r&L!m=eLymVCJrJ7~uxz zYm^)M!tRvn2Ijn3x#ut^@`!MQb35fm9m4LE>JH|-OSv~^J8NSOd@VR> z|IM7gS8miH@`&gS&i5$yV+@f8rFxt>pHl9Jxb2}WM!3OQR_mu5FO5H#+(Xt5xSXas^pKm93Z zi#|oE(5+DI5h4&IIwkZ6A*BK*2qQ!wNc6z1StIloN`(%CB1VWnkmzo3Yapal-~?fW z2n30~bp_Wo>YP%cD>UeCAV_qiAtw4TheQM;gD|2I2ok*p1kj99p^-P}NJ605zl$L> zBOA zdyFASO`=Jx95jm*Z5_9aPjP4lX5h0`)&+cy%BBNl%WACjO@K-pg}pzE^xdXm@{GT^O>lDy%D>yB~c_0Qfd8dArM5&hSZT% zXr7v?92z@M>AUafw;H{fL(@5d-=eZE;GI-99eCQEj)T&6Te8Iez`umkQAY&a?3XSX z)g0h!L{tR?d89wRjX5_6cMYjl3sG$YLnR9800ghpRA)21QAL2ast6EyL~06}T%)N# z)F4$ioAdxR?G)hMf932(i<$KaOFM{!Cv!rwH(>t;myVRIFa%XZ?_MT&R*1^ibE;no zQF-&b9OnTXQY~G~@JSWiabb!Y(6C1L+3L@wiF9FYw3o3 z4N$6=iw0$Auu4TB`kQ1Icn=V4sj1Fms6{D5@44ATAb6J&K@$*?aYLJwLj#oG$t^?; z5a^9MRO6OmR1ijbTqJuG38^U+2((Iq>>~z&k)eDf`+hFhejFNS1w312UBK^C*>vEU zce@rFb5`I#jnh%*0es7^T{6-Na3>K}0YM(o{UGLiTDWUSb)FE_o@A&*HNWYCU^`89 zIm6$m2=DmNY&3K-B(RJ1^7jkb@?Shx`R0mnH%s05d}>rOjT)4 zTxuR=pm~(R=27PPQI6n}d|66V^rIAh({DwSI5f=~_y;Nr?1yxlBYmL?c)f@M`-Q0j z`-M>g`-M>g`-M>kvq}!NClvzwb#qCMk`e{>ThBk;$f2TEOr4r#DGnwfz~cQ_&X~u2<0=4F6X}P;z@V3HhB}7`{tIfc+|3z)(vU zIK%(uY@U?t%E)eOAu5A=9~Jp#f^x_+;vCm<>Qgv0%?$X%D(eFNh{~n|pQf^I;7_P* z2Jjgw>j6GTWwU^{|AT7_=5Xppewm=~7dRm`Gr)uIbLptTR2UwuBEVH5sseuf0ZDj0 z!y8oe+TXihU(X@YFJQPxMb|N0ucDh7-mao44?3>RIV7%W45zDTkl}n44KrM&q7xZz zP|*VnA63!z54jwBIHVkxFkGpkn;HH{MZaPAsET%c*m3o8NL))9u29jb3@=pC28Q>k zsNnrQ%=I}}s%L&!iStk7UoU^aX+r1lP-AE{k)wGpFn3wF>mheN>})m%q6OI-z{+E1ACA?2QauOrx@MYO zkS!%Fcz>IP-2Y&U?a+|>T;@g|5$^fS{Xy0HKNpW3UD9rCX9Qkr8 zX0*{<&b8SyOknbQO`31HHfK8rda@d?QYa}CjxcL^IJOZdMDqGrVgBynxZx~r_l<(8 z2%8B0uq zjO0a_wkas(*nz;{o!mfqQ*|aI1loGURN$Sw5Fpu%Q09DjSmAjPNM1Y(u&ZZJh`rB& zmArUdU<+r2h=F%9c|$9i2Zcmkty}(ecIyxOG$XW{ta-dhLI~VEuu!H)`^%a%k`Mws zrzEO8{TZP*B*b$|^5Qw=Fnbc29ii1_&7CI+A;8nl>}7Pmgm@rIUY&Z{T}tEZ2|X(z zyptCK{vZ@e>sE|4G&Z01!Rt9!0k7{|CwNomy1<(`Hw?VFbKT$_oEr|F@2wHDrn32C zEJMnZO2T90n0cwhWMea_0e|^MOFBIgJ|sxA%u^yldzw)AfFPRmVts~r6(_yf3i~~c zX>LvxJk6Y!ke8aSRfb+jbX;n&l&#J8g|7uikl)tQTLkZOsvK@{b!gBG)tlNFG)mE`HV_pbqDOjbZ&FpVuWO*`3iHGsPsW&53d zpR|B}1T@Z&!&&Zlj^O!DT_workhE?R<*PP)V)T$^ip`R|fXrO#43PbktajIYGPY6o z`&7@~yfRaaNej4v&lr=)7D{<#CXvZwOj|A#_8h%I1CiBNDqe&I()6A=wD0qca%egK|VtM+4AePY^SH@~J# z-m{DfI!KJX_OhDTYB1b1_) zT>dK(c+Z4F_2cAyrN(Aw!QWVMkj~OFUdxm-SN$0Xw>LFL84`rSrn=w)N4YcqR^;v)={Vzq3uklKMM8#W1*c8+R}vj zqEO#I7TOu14Na&(XtYyhA^v+Vf#yVl*KsZ^BmCwO$gEIImNk>GjK=8Fi5i>H!7mMI zk!cyt&}ARS2wtEug$K?nzx1ZFtl)l5oi2EmQ|Abt@015>p)n%P)tH{*fD4;fKjFwc z^T34A875Q(cAC(rC=`|?LK97>7r2xOjgCU$EPhA=&zewy*Cr-Zd|U7+r)b-SPD#QB zH0N>?H1}}oWI+To_u0K=GI;nqWc7R3enHTuB-}&LWM!oAz_yI2k{HQ~FvQ5@JyYR3 z=V#$sb!_Qg2uE9z9_h|Q#L~agd1i;HQ?n& z^%IWFGwU0n<;<%JyxOQyQ7Ei$gu2bE7kHgfqoYt*-+i*)-@FRE9(8J7_2x6yqk^`r z>QuRHXrAdpXuj*z=Out(W}SQ_>%qfsC!qs8qeBFJO2Ya^la-Oe1KTp9N@64{!Vn{q z_c(>WKx1=#gEyMn*7uZh<2rEsp_NhxK5tYpr|v|{YfKqzCb*YV-xU0gQ%?xap&T-) z|59PzwQvo>5hLxL10{5wX*Ym3J5?@^68>b`1tqw4fj0T4B(DzP!gOvW6Zf)==y=Mz zoDuDEuYx-0Ri;Oo3~4f%CNG4%AtC9EP{xbd@;ZXh2uqbeL+~c29v6Jysd*G=evPRIdkLQI zRC&SeD1qZls6jYlq>1~xu5MsL4d7-@m9;5htZ5gN;MxV+txbLe^WER~=Xw6=LDFt(nw-?iFHtxbPSz)xR~>k| zQ6Uyy-!qdM(7i&eiL$=Nyz0OkjS8{w`mveRfbJDyO_BAz8k>6u{DE^7UxF=!-!CoN zfYL9RN!Tb|YqWO3x*D6&!EZa)U5<`lk8INJ0oIHvfQK1X2cB+J0X*BN`i+9O8Wm1~ z4Rq=2X3{uV@HVHW3jW%uaw#PFH8Zh{NF&{V?$K2Q69t^+j%DqRHCydnT4S@l;1|VZ zpIvZVOGc@`z?xBw-37mHR2_(V=03fwtk*TK0-@uax>E2Or^?kKu!EZ*fOl%s&ANJr zTLJHOs;oUzm;PoZVR>WnftiHm-SL%H|39m-*?;hc&UKZe&32 zEn!|Y;Oa(&&z48%(#9H_X@j?Rt^(fHxlZtQ&UJyecWxN?dgsC}CyyT4Gd~J!enDJ2{}*Mq%}2nX>3LZ-{)Kf{D5n;pTm`G}kecQFwWq!nZ9XmkIB&!k$I&M8#^Z7Vx55 zv}QN^e2L?^mSi0`ZS$62dUAYS5UI?=BtmDJPz{LJ937{PDBmJ*n`~}X;7b;Nbc`Rq zT>6m&ersO+(JMRp%eSWP-p{7a}7BGeR?!i3|0e zB!S0GsP98T7YfU7VVU5a&Qz?uj^MSiyq>dIVTCTBOUs){cw*X7m%e2t;mkc#mwsd> z;a+vUE=@L*5D0G?oB2$tOOv;@#^%-l-|1Yqz2jGvO*;61R~Xg!iQvzTssj zXq_mV)LGzTMiq|>qLkSy2wi4gH6UK;%=w>k9|?A=xz!2woKt15DQeQ)8k_3~{7dI5 z;Hl1af**0N3;d{a!@#RK?E|Zq2(dkg=maL=f@JJsR!bnRX5MH>3It znszN}hjkdWt7v~>+6{xIT}Ar~Muq!7LVq!#0xz#NoY~zH+(Bb=M#10SYLJ^Ij+aJ7}mp1*{f?qYN4n#e3G@yNsY1e>XHlcpBT`1hz3JKg{LVb9VM!F^JCe6J~J51cq zB(SWR6bRs*_9g=Rxm9@wk;$rN(iesL5c1MOM&p#x6;@g`mKJU&kI4FU_bO-jM_u~B zOu|`-HvuzeEAXRh+s>|(qvQ9XA40$f<(3*fRS>1jxro<}<`v$bKT(YJ z%p_FGC07Y1yUP2rfmOIf+Cw^m$~diuV>LGW1wPKX3iu@FI>9G9*9AVsxnbbbo$ChA zx%D7>o#f7uV>)huPa4$^{H;-SAnKXp7VV47s|K8IQ~^XVeHsRI?Qpmu^r#6nqE|Rb zD{94Xjm@b5Z?KKEX$^CnAhm}H5u*| z-iQiWH_WRCc$QH`XAlZY>1(q7x_O1;gbXRB`KtG%%tb!xYeG8%k1?uxzu==zk>vIkuDXk0zfS4n*#^XdnF&ZtpK39f8Z|EYo(Iu)zwrwU3)cO)SBrl$+who%+oNH?q7cUafM zNlw1!m`Xu-0;DHSpwDo)YkXA#o0w3DRnhubUt@FmgNIjb8BQ2dUWYhtV+*VfTy+PQ zVWUTIl^&yj8yQsx5-@W#6K*35SKCam*Qw>Dxv^>2P7?f{Q~$V>Dy6J#rSwsXCmELI zfy#t5(p_`X*c%o23)amTj-%mzQ&$i7oX1lz) z^8bpttH`$ECyzy@t@v0%MsCvBJ)+#CoZM_>m+Ja=%1s{oizxT67dv@ukLHPan>Anb zj0KmS#BA^SqTHmh8M)ZI5n7}pH8zg}@ZHW;%Ig=uU1gJQO8W^O<86WXtub`5xs3H1S8DD>$K35+(coy&FnRwgf4tOl^pymm!kZ>Ol^ zE~m=cG-7Qt2}=UOBh4gyt~5=TKGfKpN$~WdHACsUw`t|*9XQSauV>2ZRiIVz?f{tZ3TdJr8Yq z(|l(|i9Y4tV>5m`W&FbX==DmxE#6L~*%mMO-5&j{yf(j3-wb{JJO4U%LmCR6e?42S zlZKQJ92^&wV>$wXRipZV2OCufqMlo>O6s%bRa;n)7-Fuc`|?sZ#ZdYKy~INmAX^-jGkNT!*erCeWO2%CzTH1-!F2|SA=I-zHg z1pU4u%_6VTe-M5aS#D4!@YQ3d#=uvP%>=%BY$ov4V>5xT4sRd?-pU%>I9%`xYIM3g zt|>?h5=CQgvId7^Y5{qD&V<4P4B?sjG$%~8#~M|xIa!q}PE@(xLFRX)Ra-0>N-JWx zB8Ce$$aQkFHA(8MxitB44lpIs7UeCEM@K>E12HtRe>wkB-C*(0& zerXK>?=xx?@Ij;Mz~49pBwpsk-9gp|n^(^_1jjpdmf)36mGd5@hJv?nt^(f5 zxlZub&UJyeac&rR+YJWpA9)-oztlhA!$ysoF8H)jb>Lr&8r7jV#LM(=w5*RYub$%s z&vNQY!JC~b_pe8>dcC%x;2oW-fOmGT6TFLaUEp1v8wT#%aNz!t$5{EL{sEsgYSasY zuNzed&M)!QtcoD!IdK2T<1P86{sA8!(Q-@A^1l^CDf4)b(Ag$b18!$R{Rq9_)Z0Njd@%8Q zY0kG18HM`?NxMn-?C373ykQxJvzbe-5=@Se_Z^l?5sL~>bDx#DZ;b`sS9&{N_~!E^Jf}aCxJuD+#Vql8t&Hjc?qXDRcfqe46|}MLGm|RtAftj8AAx;EpY%1~8Ngq6u2No!_^o4~5QdeA zNw1lNsh*%qKlDgL!M}H|s~jD_%S<{%$K+ZwsR8dYs+cPHYp2SwX6Vw}8k-#gZ_==* zf{-JAe>dqm@C$NF?~}k`M%96cXWlw{WWAfm9}2$CxsVfn2YNK%2}U)56O5_@5zpi_ zN!E91Z1xHKnH>l26Mm0)G~la7HGngXssjQ>b*(7OTw^8AUj$UC?Mw9TxLNp2g z{Btx3AI(RT@KxyuRWi~WF%-O;bD=r-ZD*aR1IHK@niIW3bD~LTPBaP4i6)^r(IhnI zAXRd(H)1IGc;`ZM@H@&nQ3sAUDl{j0h2}()(41%zniEYzbD~LT&IDC*nm1x7_=nDg z=HPdcb)pWu(x}j!=oOk1O+s^`NoY3hIadJ>cP@t0wZ$}M!cEdV-S5*p6nyJWb|>ndl~ddQ;rFo3 za~=4mQH}QnkCaln?VKifrc-UQvmn}!n@|n-wo!d(lV#=&1fdtqtBTjBEPPm5Ps{pM z^D4d~>%+{eI7yIt(nq|&v)!s(+6Ybj#=b~~f;V-p0^Z8GPVn~5b%A$qZWy@7xo&X7 zx#8fKN42Io{gBGca=slTJW7t~&I$Zdf6FiZ?CDnok;?qCOBAXh^kowo6@|hNu_83a zGO7YkHmdl%G*>q5qDytIrZMxO{8bY8kqPwxA2h1Ef;4wB?J97LQN2eAUT9SH2Em^@ zRc<&Xe9yuaK*qF*&d}9AxD`pQV%iOw$(RZT?%-DB$(ZDU$Cyvfn50qJ0}D))p;Fij@FHm@H_lVp<>`T8CX+C$|P3*srdalWRpIh)|8^e&mbOW`=r&fJp1+a)H; znMtTVnpCmA%)I)4Do8?^dtDUjMd(};Y9O@SE+pOfn&6&JovOOews+~qG{HxmDo=mR zU?(!T$TH|711}}~T0s=r8KEDW(C8=>enxXwSs!3t1zyvgqS`a8w6Nt780TJp7F@#G zR?x}4ohtVTfiql)Ob|?qeHRHJm?--1Z3{_u$!k3i|GMCUURtl!##LNE!H!)L0eqj)E;pZ!Zh887muF-iL*mBh-(B$=t6j^ds1q7l@S zOjvbHaK?)U@rR(BM-=#f?~g+7p){J&!Ti)>;=+siIN4v_<_A7j|KdgVXVh`LPLAnb z2RwC1i%U1Qiv*F%-0Kj!!h~wTGfb#I3WaZeUYEd|Ce#Bw$%GmREoT4tM`IJgEu4~s z4PTJJ@+K5+Bg1s5W+ug0!Q-6bCtY7~>Qf3kuZ0hrZ8KT#WnR^%1YIb+t{yIvw(wXJ zGp%FbAmO>Rpk>&gee^Ef#=oVj0~2v^_@7D(@MMX(yEHp0s<*H-3flxxv)F&yHz%WNKZGDkYGiL zt);OU9ekv7;c*tfFArQ8ChM6=4Y-F<{d)@%Ec0L#g{laRHle;K6dp^WP%lDt6Dkne z&#Cg9jnk#GH8#5j{PT@3{n;IfDQ9?l}jy zMF&rIu1nB{F+xXbOsg7rp-~Osl}6Qph^Mnc7j2j#^c#)Mj({sV4-?m=%Qj3AdPZZ) z3HYv24d7pmssj-m&*x1-Xu~^ zz=ds}Hh@bSRR7#BH-Xh8QMVP|*hi3msBw^_?lkQhaA_0jN1HS=2L(w!WI|Qo;wID= zg~CA*g?bU<-_lG!vmJ%PK@o*^LWqCeGyM)x6bhfBMWOJ0_4BHF0V}mYczdV5Cdj;` z69;&pTiIe1py`ukGHcxkPtZY?UpF$k%e3pjMeKlUtR`4@Y9GPP+T`MS-^w~ zgtvFNytg;;p4 zY9=+HdxcmdWWAMn)q#akAr@ZSnn?}lULn>PS?_ILb>N{!g;;p)XC^hEdxcozWPO~* z<~|4C>Rd(8hH*kCXiW2L*969Cyi8-}XHq8aH*(g+D>OEvgP(LRM91$c*<_+)a<`e( zl!p)dx1tZx*o+Qd$hi=`L)YfTKdo`W1&v~ULo`e??dX#CG8&r+f!A;@B!u5e7QGJq zvQZ(S=oLED{f$oz37DPPL zC$yhLu%iaN%!K-YE)@FotOT|&uWOG&bh| zy!ba;y-klDGs=D)IL zoio72alF9VTL&KbZO;3~ zM8WSIVHEHOM%94?%q(BR{lLQ2&Jw)Lsc%U0bknXqE%=O6UPKF_P5$Y;2Oe*w^ihf@ z8MekRlnH6f`U5_5s*49&H#2`2bZYdRJT~JQ|ElH^%8>MCot}7Z<}st^7o+DSi`nKG zkJN8O&&gx271>t&*IiB%zn@tpb>LG*HJ%lG-l#ed z@yxC00a^drz2;IX^J>hzAYLZxN6o7s{NJ21XhOw;f}Kv4tL@gM4K+5q1fJ?#=n{TA zn{*xc9ixizf+rbO2O^$6nk?tEx2z9vuOAEk)Ty%9#=11lOd5p$p;N$XocfvI6sOAC zBlJnkzWzbZQ1A+Rl}@j=lZFiBIMzPOsRQ3IsyUo!L2c^9N?Qyy({>MQ+piJ za@bE}a=c#fR;S(+{F77f3Vz_!?uWLt_R^TNUK3o@KP?!jzyqzC+SP)SoGN?WtV>hP zq*y{#ZDQ3GUsiH!TapENf8x~KO3srE3yLr=csRoB>)~iQea@^*N+SPcvb*N)=>wU& zV%vjS@Vgq5)l9)TJlqm0q038Q3ih=r2&It0J61~H*OkFHErY&&Lk4{@gFagBJ?M)) z=p#?oTKaM&&O#rlpJA!@#fb?|y;Q>nNY5$Lj$U5%_{K_Pc?D%WKRo)Y>cI9IliY_# zU!wn@C$mUOZ;SEa?8KOvi!t-)Yh#R=M`vQp3^DeR=Ybm2BfuX7|Ljz``gt_J6pByR`V2mT{LEXP7%eHS`U)(9q{d9{i^>#Lb#ht-m--qY$}X(Xx~ zj)`sCV&!Q~UL?b$CW=)|a*1N;4!*@WGr3X>4`B#xPk_B1_pJf^Yv00y4t=+=!Sag}nt_&sS)E#V_m zOx`h*a7yqU4POj3ql5WsCl?*R|9(l-HZOdU^WQIt?E8RlaPW-U=)YeQ>8qFjeo3Sc?D=9S?Vxr=$Sn#YF8r_UVJmZds+5M-yV*b9WG#1S*2~c5 z{!%m)+&R`>l;S7Vvfn{tI%|MO8C6^?_!FmoBlx&eFAKiq6!0CV%2ks&Vbj3^Jkz`y zWbl+zXwP(tFz-554o6i#Fq49+C@H;%4pNG1hsSq%K^jR2Yvv&HjyvWweITl_d7t>M z@Lp%OuBH3L%yK0iIG!luGg`Q3&=s^|IhwP91ti zEArtQ(+%f1!BdCW%RkU)F*9w9wIaTgI z0)tmOMcTwj+KWn)7?~u|Y%8r-LUc5F%|^AEDPfusOhRZPn8_$3l<_)INm6wZqN!B< zFKaUuzA3{p?b4~eiJ#s>!GCowoap#%C7X0A0Dogt<0V0qGS7twZEIdN;1fpmMWOIG z*DIi@WQy#Ko`zx49=upm;I_rWMsLuij<5Gsxv--0J-Oy~8l zj&D(qXiU_nPiRq#YRsJKPbl99Ey+8f`&+%I2{=G&$n!oROeLyv70lZrUxxE8!fW1^VFwnanLR}NhLD$-ofv_trf zbg614#jh0h&x3})Sz+%RIQ%!H`CZcv;ZM}1^Ub7KQen3kG(1(4MrQ8skmf_C9m4-c zmtHiJf~r{q`R2c;s(s~$x&M^b?xMDaL-^0@(yC@sP&I2HAO2;9Cyh+?J4kbsX@~H; z>(aN(q+rRh2J+z_S9sFMgug(VKQiqQ{svvT$4mbneaW@o9vtP1D`;) zZKBw@@7O*Mzc9l|ig)@>2QQA%L~)XGj3%mlm+AYbGIH(FQG4_U(ptmr7o*n^+{~%} zWCq(Q%!wXu1>LFEvO892=D)KC-&bVrooPC`SCUEQr+ojN89by6=JA55eKV*2le4h3 z!i@KDCkvkC)PFLA^A+ZC5BFQam!0}gX7Gl>tYG({(JKqC<1RpS}4n#b2Y91@=DdyEc=oP1c zuRHaTV22XVyzf?YX=$%*DEM;cLcRE{YtnV#PDVBQ1$Q&54n#bY)3UNI%&S1?Xs3Y3 zICYxfxlWbmx1v{$g)}zj7rcmb74Txtb%K|6t_%D{=Z1k-bFLe_j&sAob$uw?{N#4X zKnbre$MhNmTlI=MP%c7c?fSPjFAC z%6YFS&Cgqsqe$|4(;h{*-#In^sjYBpXiPWlbp*Y-a35b);TExQ4Z^uscn!K#UX!c= z;c(5mBQjhT)8x->sD?GXO2y0pN_Ew!X_fO>U-^(+_uXGL-@^gX(ux&?pD=r4O%@_zc+C8-;w6YrX9lnK$k8xlcG~~Z8B)}b8Aa2 zYTtK+>hG22qoy6gKc!1=m`Smpsy=7X>ZzJEGIRf#Q(6eQc!YakzfErlnIO!b#Y^CzYq!r!P%zcQ18 zCC3`bhhIVANh1^f57PYDv_tqgG{Fn|M6=&n1NrbQInv04pQyFV)TJk4Alo)k?A(p* zAPi6LoTPZCvl1_k(L`~Qa*SqPgMR!^W#rnUqxR@6l<%>g_X&b$IaOZl|IQ39QkbVa z++n&?jkh~iXy(5&gJ%_)duN(X?v-Scc@6q^X0W6R-O&rC_OqP&PtL;m3iG&!`>o*1 zPW>k{ctc@Uu$$26l?B&w>OYymdJ1!xhdV-Wyi@34YeOF7WT28wUQuIjvZ^*DCUOQGV$zxv4 zsVEeFkN}}q%&Qu`!jGwSDZ^#Gu%X~DI#&U&>RcyyHRrm(t2;Lge4umT9gRHJluhc? z?t-{wnuph>=2hE5a0jQNP-q@PYnxX!dWGgq(!1MKjm?z@-aylkTPgVc+TQ)@z&UL8 ziY`HvGB@WDGTGIG4F&(&xzKI=M#(0%7I?N%#Z7`JWfB;tejTT=*#Ypa&Q;2L7QYj$ z-{GMKlgrE`%-RfHdfp=q1;685h>qV|CS3=T(LR0c8jGm|ip<86)S30jXgX!~)DdjRcaJ%hm2 zYP=-)2dBzP2;FWbq55c2#d?}~6~ubYsec@+7oq3Os~;ii&-^%Dw_3A<#%9~VPY>2M z9Cw#dS~S4%Mm5e9yuhh)DG2RrCZTQ7q>A-8^D2mSu2cUwRxd(7Ft2`us6W%T8M6MH z#%9~V_nbe-YQu3+8Kt%XzhYEl6TvNvssmBad~7*~tQR-00--&e8ZUT~Q{^@x@Fh1v z0Pl3$I7?SAax36vPL;K%>C)>Oo1Ft+dchzMVzTd$P4g5MoXe;JxPno2;KoK3z|D=S z?=JXtqrwUEmNNL9nKYIW+{dZ!2p;WJxdkM-keOIUq>ffkj*D8WaXyWiI-^OLi)a!a+M`Lx zrR$+qF3W0c&M5ec&Q-vxI@bwa-MKFCmz*00?sl#lypD6j!S9Gm4<^$>??`xO8Kv(_ zfCm{>08cWi27Jw^0!YBjZ$!m#Rl@zrghl~fD9pu3S-)*w;gha?b?Fo{2`X3U()DH% z)?`Ji>T?>KGYY;}TF5QXU|!*5#pF0M2@ePpbm-8rz}w9$w6No0Z3@Oz z#vH=)I#&VD=UgXv3+KAZiwgY(WRsR6a6_Z|fEyWA2O^&NemiQ{(Eh3k6~Oh4Y5<9m z4ric;3$0pS0xO$Pn1cyBjhNMdYc)0(1$c#>Th|g7HY|Sk%P(~aIH%mw>j!Wlqw2sV zjA{V8jH&}kA@eFj22(79+WUgUNL@nPg~Az#&{PwuMxkmH3LA_DxKRCf(tO3W%j<2r zE9lj z;u@O?fKPTVEc^~#TSjA=4d7Zv6`Km8l-cG8Ep1*k;CeM)6A$-_J3(KU>yu!?WsDA8gCSf1-=)s{^V{^{H zJ33bZ@9bPBcvt7Tz>+rVQ@02{<G*x zfuEaD_?L6m)}{5#q!0Krqjp8$ey6CT(=yt%Jd+bNJ*R7I&H?yL=PKZ{o$CZ&;9M8@ z66c12FL$mR{3GXvgZKWit??=I4V3WRa!hjr{NB|qzts4%1gBkN)N(&+!4)+ot8T$H zomxw9xKmpQZtYY}u+OP|1dnj)WWjTs`Wx_C%j;8u^E$PT;1*6@B6yio_Xyta)KtMI zoZ3Lm+(KjS-9@du&RSXnK6w>+_8+d6POvWb)8&eFxqnT;pF8!iAQO~gls+s8&)%@71>5J9$#cKLuHGQ#~ zzF19oPI*aH%x9^OCiOEd&(SgU(Wfb8Pkr>WVIoGy)JN~EuKPrcW+AgK(+}cpq!s3K zGdfliK1RL#hppxG3ytZ92)xIs@{DrnJvRYXtt~a?Y8#uY+M$BqajLBS39ICC7JB`+ zYQRVA)%Xsw`V}qfH8rL~XmPE~;kM7hy?5&?TV6Y8OkT&Rjpy5j4PH$5cWt_BPYC|e zsd8NhDUFF5o98L(NQRDktnn?LWEC$ zM*P;XHrtGV95J6u1=dc$!C zIi^hke8i~YEx~yul`>dQ5cNTl8f8LZy%F9`s=(iyS3lurIyJY#f7&wYC!-CVD$i03 z*Gsq;O?wpKytJ_1qR>tVy=FqAqfl6HQD|p`2%lYVZ)w7xwrG7ZT3BzG(7iNiOex(< z)E57EYP%k3t*I?FHrE??(llFdGs+Xtfn!aMX}tk|Yt*Qh1mAWFIKR}BBancZUlSnQ z?j}@2`#q=1S>i=z>EHGOK4)H|5hAm6TL#W)NmhZLWLN_+Trc77w{Q)@6{g()9_Z9W zK@S&R_0dK!t*@)bu37S%MV9-rm$KR0vg!d&HL4&hPa%9bn@M5v*h)+x{6>R7n#H9XB}eM*jL ztpl%;U&{VE!Jisc2cn*N+dzA&Y1e?)m{1?uE)>p^=@NLxgmwnLV^nn=X>MxTRg(OL zQx6F~;#9dQG~ff%?)j3eYv$DxwR_Nh#;pd@rt0*^H%*BSG`ZRE+ldU?{KCJx__1nO z%u)(B#@%$OEv22PWH9Odqn~x4%`cQPMqhY*TVr#bgI{*8yW9f&j+Ra4Gqz|_#rib! zDkchE;Z!+R`M;Yvmbvs$J*CWr|IQ?t`F=J-m6g(sAyVO$IP;bqy@F6Q3GXK9T$l~3 zp6gpj&7m5bZ390&X^>+G$M4E9g#-T0sK!E)*imCTb4Ck(-Ko4{o z;cCF4Ce%*`E)=HV3JLt&gbKXYG@+sW+F3nph>SXUW0VDE&R3$Pd!P9lE^HhJxN|u zH8z(Pc>0ZlEG-o_&w*k!Jjxc9K8SSwuUCmQSKKBFMktfPaU|~tx%2a1=s$W zQJV-BPK^^h)~VwJPj~8e!F!x~MDR(c7PzeyX%UU7{o4rEojOzSJg2S{e8Z{t1^?pI zn(E1LjY)oALE4q5ftt0RHLC{1E4@H|c+^0CZ)$eZ0T0~9nmmdoBb9ltXSP0L3G@Q- zN(sCh61ZIU$E(4~X#COqN1NyPqsyu99zBoGc^)4;|0jB$kn=ntc#`d5H?!J`A-A;P zY8ulXK3{MZ>tI0#>rTBTcfL1to#d^& zS)1$y+ian`cc_Kat(C=`5+IF~BqzW^F2o72om1rop!u$AU#Gcv%I2ilM&Y*iyg4_> zKV?wX&NP=WZR;}AToxahBy9?9TZHj23mLq}O0y{l%j$%`8jccVmcL*j0f zsB%W!jRwC3t)NU;h#4VDB8_B%CM!Bol!RbLC{r4DK2}Qd;?BqN%?NQP9Q^jTnN|YJ zAS0A14NcZl^5T|?U`FU5#X4SN^C<~@@6THYb>hOOF8G~htF{h&%c#Z&f+%GYxLzhx z%&WkwQ}NS6-cGRU)KP-RI#pgtXujs!Xs+vCqXZ9i3P_BUL0NkPSr4Ak<+Aby1R(_4 zVo?&_nefvj{EEiryn&azbKrTy?``>|7Zu=UcW_^B>>!9zX5JQ%NtbyQcpc=_m4eqe z^|;_~ohtVh%@J;b=4npdA~@A4ATctHJ4@DsXY?0Yc>}_{A{%?a*>=JQT{ zN&-u`5Re!d?U!dOqiv+=4G8l_No}zx3GYn!gCu;0#^!zlzc{5eZ`n_NalGHEtpoR$ zUkZ1O;6+aTS`hU?l6u^RC@j|Pk4qW-3mR~xBfEyZB2QDD>WVN>7x=wvna8sua5j@PPa#kdDl_gaJe#J8A zCj;usd}MZx1XeJw0pr^*?1=+bA*qy}8jsj?TBy4(av8kujB?o!e(SoM7| z!?13~E5ry63)>9HXS5`j#;a-72COqDmy);OV+QN@icKA;;sn-Q*Oy>%4z6-^5QrxKZaf_{!g5+p&X7T<`nloLS6womlF`NLHCr%E#Ab$^_GNXJmssV@Wn*bHmCdEP_esB?++25>*br zxK_d%wjvrV?kk+?*ZFgz#w7H8LEnKv8_g|Uh{a8eB(#Qvh>=MWfl)5RN@i;$uel|J zV4}#EWHZ{FN^Sio49O<1O#k20h9PW5do|@tBa#V)w>9?~Sy56(C^NUqDm;f{^5T5t zaLfp0s-u#2~kT!mE$R;HOT*#v=;wD@N zE@xEu{dT;TG?N<8y#|csCgI1Rx+S=-3D<#}7!^v#Ykf1R0o^O4(Yfp;4fO2_NxW>N#XS4d}?te-ZoI`BoKLg{!tVj%uMKxh%=l&V`xa9yX$okQR`7eWB98#@xxq}rwwS3)^FLu*U?{lvNt?H6<*Yk!Ttbd%#RJD1)qk=eQkl;a5$ZIr z8t^+tH4u8tsgDK!>eSx_=TPREHL ztR@^!u~O>5_l)ZQSP-eqYC`Bl^Qr;=U{nL4O_ftxC;JHQ@6>^Uhd5Q9sN1AD)wBz= zmoTrQDp+?4?Qb|$)<*Lc*G6-F_xhS3f|*$vDU-p&pCzkXTzj&h_dBd6G-+Q74{Xbb zDv6P-2t$la-qRHR6^+f+1it8LJFmhm8^8Chk~(lrxus7+hYO;V`EV4WKbueuxVi~7 z5E|puVS?Xs>IlIjohncG0=l%4nG|U6?9~2(#7K<;x^|eSetD5b^1|915WFVJ>lTg8 z836D9+ks~Qzx(BvZVtdX<(Aq#R1l@i3?Oum3DtmqGpd15w`n&v6Wr3Ntp&GrsyqYF zN%JGqF3?`dyowD3iII8eKU zqYB`CM%95Y7*zmYF{(bNLeHZy6;tlz_ho&nc{T15oX@-(O9(FQR9T25Z!jUth%{38 zg(ZMsqJUkd9g^(Pr2{lJ+Y4Uv8M|RkDd*II<9O4q1FtZuaiid^M%96+XC6@wk@ZRD zRUq`3Q|}3WXm$>~j&tF$6~B1|(!zu) zkIR}#cx)Y`OS@@oMh73_T!@a}-m*#2fd?BER!Q_4Fd@`3za&}H0f!{gmuhS_8GM;@74S9Ab%K{&eBdUdf0O)D?!ae_Dqa*sDU)co zf_&M74F#{^Tm`(YbDiL^pC33;^f#4XN)&j9Q3dcGqv}A!Gr5mb++#fcQ1G$NRlp}X z*9pFFiGg!R|4jL%+<|k;EoC*oAWE4;r^)0u9&9Lhx^orq)6R8*7hP)LMA3gqekoC4 zeaV(v%4%0Zlro7Pk=CTEU>5r{ziR&(JCHx+cU;3dl;6ids?=wpZqLh2Z zkpSN5$pQE|6Y7sb;n(TkkkEUUQMjA@UYA}qllsiyRsV~q4hvsjR`82Xl^enzY3Fa# zHya7&kCNvaiQoP9chmp-ck1cIfB#N>bCoq$(*M<r2J<(Qcp zOrAB98t~)iTGHvhIYd#2mbsN8^nwXhfpeHpn433c{UtML)C4Cwb-my;r(O{J(5aYY zn89w|7Uk73YS5zvNsuc>Nb_9;TE}unv-|$}!Euse)%YwTL7>r!g~I2yJU#HQ3j)_)c-@7BXqb zK#sdvl6Bx-Mumk(xKVBbyuhfw%LPdwb8;c%v8upROsGKUhfbC2I9!*$YbN1Vc#SUI zGO)=+y?I^cZ(u{gS2$MzU*lXS_$SVFfvG<`G6RdaxS~;2^1j?D;I&5e-YIy$QB|sY&Z%;Bl<=mP0;Ht$^#>U+ zwo&Jsc0mSHjcSkqV{!y~QpFo`pkv93rZKirb9+DM5uDGda)DjCw5-Ni_Y2|WovVOX zbgmP;vU6SFRh%0JUe~#9@Z8JWo|sb3t%Kx7$}#N;;OWb;C;HA2L@IMQA=GU`HQ*0S zs6gmFr^+cIw3Z1~ftwo@4%v!|Tu5WH$>2qttAH1Gt`oepb6wysI5!O3i?7A2uY=rXEQ<6Glx;suA%*=X^%pCW78fLwZmbAcDD&t zfx8%0JTCaMQ~M~)Ar>xtvi(h2A7NfSz_X32lJ~+UR0S?!RPXA7Ya3Oix*eP zvv38FlF~Uq28?age5PHH!B>rHkO5{iwo!+feDO`eBb+K1I6;?A z*Vx=A;4_`8fX{ZW6MU|7UEuSa8wS3}xo+^a&J72@|0UZeGs^jPknm(VrhNkZcy$h_ z#+;HsD)pzVgwSOsR0DowLIpyvNHBBDMCd9LssisYDjZN#^dNP&#%ANe_c~Vr-|t)} zc&c+<;76Pr2A<|zH~2~ChJ*L{gS|=2n7iCB5`IgLsqw&%Uu^lMqi4UDTX2(CjGF(| z7W~0$Ms4?c3vT>|QO642>(pa{b)}U#*+}XgOR5I+VosZM#6ZPhoIXDX9`zDA^?g_H zPN(KkeLvNh?tH7O)R9(l-MIdy{I-A>V%VQNs)+D5Rh1|@2*(5t?>8bi;L6>Zzv zdeukUXhgF5$8G4P4S1!$t9SKmv@iT+Y}N*PX}JtlZcAE@;WgtNb^3hkcn|O{qpGZ= zJ8eZ(f%hBL`?TQmMpf5UFE_FFhPTQKG(Q*HJoNzYGphQEG#7Ls;NnL0t}6HyqpCz^ z2V{Qa<2x$U7h(Zq^`+x%AuT}0Hfm*Cn+0ogBTKyz*JgvYd7E3YHd&BK{#32a5!TNJ z6Tz6QfH#{}!9>usWJPC~kVFCLP&#RUsJ{Kt`q!9WOP?`00++Otqu^X&Ojf`n+zRO1 zsJL5K*-pt3INcl@D`)|)ZVS1=zGKrRM<6>XbL-eZGsT#^fEW5CJ|y^K!>Y(cOvk{0kAHq`}?Fy34n zw1B<@3zi^bvI6?5ELfF{$qLBoOxXhmS*QnTr5~)Z`LqI_`ey5fnYc>%27}*Gwo~fB z2j!O87SSs_K1Gvol1G#9GiK2w+{dCx_-7?2sJK_H$nX&(mv*qCg9(>T9@vCSmkw+) zO_!!?Y)%yTQRgb)C!OmAKRnYWw5vR!=&$}3Pf2y)fq#T*94?4bT6blkmlb$#6Dsh! z-h_(ll!^<5mqY|6xX|qqc*2FwlMsTLRfoU>F7&Ph7WfmT6M?e0iSXT_=;0N+L1D#d}_}mwZD>Kt)<7(@+`3YGOOoG?@8GU>$auBx}aB? zbg*ul-a+Q_pX)!GQ@m$1yRB#CIUZO_e~k42?pM3R6p=Gqv}A^GjE${A7x%O;0;C9wgFz}1cb%Wn@ZaDaycUy;L znx2W_Qv?$JK#pm8fS(vu1FrIC&fU?g3Lfv&$_lia#`Mp&ts%IUQ`-ve;MC57W1Kof z@Cc_)7W}?brwdYIidf!16!^SX16=TJs_FZqs-T4QZcd(63)e>mgw4Fc6ON9h_etQQ z7OsfliWshl;le7Vgbm)YQuYyC!6v4Uxf<@&{(|&5l@h~+&nG%n!ctaBeX7E}Vj0vQ zl{Qso&h&X@@_EZBoN5cI@5@^MgGtAiTL<49jOpM5&*NMLys&ef;6jp3F+;H&1!|lV;X+z43k%U)~W9kg>!!`M^bo57pNM%+vLZ3FF8t{D+8Wn}Y*98bI zXhKzBw^89~|5;s{VJ1Dmjy0&G`ovdS@NXK^UI2b-Ev$MM7hJ}u>LG$hI#urEtqOCS zh3f(S)~M==64=^>fW1cb?kTvxQB@)z?^HR1@2k&GSicMU{Y5LdSZbYCpr^zDSxsGVSUX(%jm#dw}~GRVCjadEUSqjOu+<@O`7I3$NYEb199P&%r44PnJ}% zrK}G%ucA{`EirI)D=FVKE$`wK!P}f76Jn&!U92#ddbo1u?$wIC-&SS;+{o5NV{1Xi zqzxSHR+kG-cWQCfKGS;8VA827y<1V(*Q}Zbg|R1+74SW?D%cCG+hoP!WKSdt$RbVI zGov&uSSP-^Gd_fc|YVueGsS`lPR1nbDNP{)Pn5=-lY#RUC zvSF&*b~Q)1@9F}`?oO!#IlfX-%mHH?b!nU)rYTtgpLDBuDBBUpR!&I(+fK#*&%1hA z+hdK)jP>d&W6B=5qdPt*INhl5_J`z_tQ8n|^I)RM2Kp+x9PE=xfrB zXI5s325k<|wo~(6Wx`RJCI;=cW|j{u1gIx@Q4)fQLKDG6m4)aLnI#jZf<2L_D>W4y zrL!mWh`juHG=%TcQ}2cvoA15gIoEAHT4X;Sz_BjJ^h6B&hEa|03Z7t89f*3Sl#OM* zvw0N=UE$Pyf)6-VZY}}`x(Nb!r=|3`u0HElz!#hKXU4J!8@HQmqL=gW?~tUMr!Yk zx{6?;fK%MDtUXb0kC$p}wio=D_gXK_i3?wp;CG$BeF8t0TS{P{XqaYxFDD9zFG`|G z_@X45gfB{>N%*2IoE5Q zD#@-BJhyXQ;7>U>3_Oo>-QdqUHyk{DV>`KKhIe@qUP6v(0|GzVu;rIt+&>mXD)TOn z&?mNvYrqdoXjBvm@A3!@v5cy~E~CP`{KL95%}jcLGmWY)wo$934K=3sL*V8{_3kXV zn^D#6MzkDv(U^I${Q3qhlYKO%e+m(JvQgE&qU8dW9o9s_4E=lZP- zw$_-o_5+Ii7t5gdhL*!OH72X?3!d&&`Aqnn=J4}2mp#DsjH;5{iJl~If>FJf3tnqf z^<}|7IaSW!B2_ifs_OyXV^sAxC3Bi5b&Hz#yf+j0x>3EKQRHsVV3Oc%PL-27UXf3- z42o9;7xR`QK$WTGJtJFlRcK7q#ex%^Du;VgRn4^O3Oe?A_u5QwOQ*_0H)&7YVtbp`OsJk}`|=U<(gQ-!Tz1vb7d__kA=B5dcB z7jP|`(SmKvmP=ObtEbGW!MwA>lNFFHm*$2g%Si&l+DbTA_Cj?_joxlCM)2HuEp+WOl1SlGONZ_f;Tt?yveBI z7D2|O1-#p=8ov~r?o@f9?xl6cn1p~Fcd44sY3a3{e3@0brPi6R$^ytzOj`fk5@ac+ zynrmlq{Y%>Y-X>eH{O>ZtCE8%Ic8QRtC2Bj0XcS)rHcSg+as>K0QZQqT$qKlj-9HL$DU8Vq=toQDOdsrM z;fP6Ez_zQA)z+d9KB${CvGB2!KI zcwoA?F(xlGxAU%N7GGRL;Wdaug)wOZ{hYBi)OPpdVaxAp1(2IrYGzzRwqQK<23wmk zB>^031=_A>EhTwwPmIYC$Rk6tqT94OQNVx49gD^@CVAk$d=PL?N?O2w?rfm2w)^j2 zwd}dWw&mHju&>k_=N6c(%Ih)HPr|V4GG3XM-=U1iZ1%j^m^3+gkv}*5jL;0#^*4>p zmsapao3@??vmblmxacN4GS-2;Mm2U5+|#H!5cNz_bI5ve^C}QJ)~P=We&AGjN)TAt zO{NOso%Y6@s&gKViCRH$Wv9y8Xs+YhmkSaj2~{Mphzp%1=nV+R51QM#HiZ)-X)h%q zl1yJ+ z)TYUze(plQ6@1RAvNoC@xc08HCPvb}Pt*f0w1e8}4G8N5&AHu+!ikZzr|T-oCTbaJ zuc$Fm<-BRv;MF2PvKb)++DhwCSkg#d2nr z+IL9vd22u)4IoDP8MS*AhAI<I zKkGd}Xj{_`n-75xTxg60yx@ZBUL3e{2+V7pD~3x5!88SLO5kl58m*c3Hiab79Ol|o zNsOe;tUTcjUmO&TCfa{^`w9&I|aPJDd06u0qIbh2%1EP5>?)3_bR{NSf1g> zD%s0z(@Do@R}yC1u+%fL5NMlDGD9$Vp^0Fkn1Vt2jb>YO>110&%GDpDDH&@EEL4Ar z7BO8&>2Q|*b0->Gwr$6;8wS~NnZ1O7pK!%&Y_H78!D&Q$GgCH2x9z(%HXj1P_c~Vr zKj2&^_}9*Lfgf>h7O?uYyZjxat~$>pN9$_hwRm{mcZjLFKN z(!nCkPc?F$pq760&MeI zQOS{PvLavK!{O$3qB0>y@?s8|twb#;?PUgTKt|{_Y2%%Q5Fpt^m1pT7#W+M`^E?F~ z>RbiOj;p3-NAgK5g0!g!VL{ z#=J`JGa571A@GI^eP04MyU>$@zjvzKCIr@S6EZg~DgzO@eRJ&*IN39TaC8>}ikql8vM??r2GS9O_9E1#+62GvMgs;l9^;)_&TMrUWC`Cj_Y2$kmE^4+_^kkEo}7x@Rye zXsh0JypXgH@GhnC^G+NBX-Y6EQbUzsbC`FH#k|?Onowa=Fbez|W^?aqf!Wf#_Q!1NT?b(v;$6pJY7i-RSIuAI((iKJ z^iX3Gj9xi=B!m+G{r_eNX=2fT?q|paR;t`-#mb#_dk#*@U799$=>;irmtJ7yjzw;^ zYx5XeTl5QVRx0Ip-e#|k=QYC#l}bok9(_YIlr7elM=tHUHZyX`+~4h3srJ*)zBE}> z;;S7U*b%O%jEefeyHZY8GF{?5K$_Qhp&E3jV;>!>l!L6gbqG@qpEPgt+NxJMqNw_e zr=c5DqR6&X)Xm*m*>5lv9X+!&0L?w+F(!diywGAy2}V)sR)Q%)67Yv2s}mr>C`!di zFh%He9^U=I^7k<8f!?+T?G>e3a-9P3<(mo8N@!&jtAR7QG5 zHoe%YG}_J16zgY{_~abJvAzW?+hEx>s)jd7akJcQQ6lpR=gPc>DW#OVFey|lp%B z3OI@l{E@6ByMy;7ex_4gQ#dKJPEv}+Zs(#Bha?$Qq^Lp{mH4FS!lkZY+0l}Hl=l_L zF7>{8vU_=7L)j;IUsKt=y|20Ke%{woc8gl^QH_4IT2j>>78N{%z?Auka-5?S%lu>= zJ+6B!$5ghIZxSUm+zBQA6^|sPtJrf~ROaHEaoWqk=wMF7oa|i>VJ`G8+kMSyvC#Tp z^c(gvn>kmJTB}n--4@BL?_6D*W43gzqRrQh0~&xSzvExCIu(Fr6<{`VuA+-D$9UKN zr0MO`$?WS~T`$MH-MNZXOw({tU()-xZU&i-WIo4LkdZ*f31#MB{?ohc$oJu#?B|Y7 z7nwbstLO>pUGD3a`JHohtx1}VUAiI_)5E)L1rL&DsY{m;`%)K`cA%vTsfnL1n1WV8`}q*e461(~gx{YrB5|qpUDe-69%Cg<(~gjJi+@ z5z9W=bU;3KPmGMx=z$tVrQ8(BQ=8Ck$iu`RzKl#gZ=%WF0ZSOAlc~pU#8NTBh$Zv) zpEDWuTNnk()FZr19@c;6p^`Gc-#b=1UQwws{c|R(Lnn+_GX0|`n{xQ<*6W#Cp=UiB zhq_bjo`*V1(XgzL1ayloi@5LUR4tdLH#DvP3YyGk%}sW=_V$d!Z86I7(Z zL{a)s)-yRpXbL}>EC9=1&Sfv@7Qct3|NR32uY%D_vdkTi&^uvvFU(fXRU%XBDMc+r z^G&CnkD1sT@%{t1Zn9ypS|ie-yPHLQTB(zL?69qAkrvURy&|{ZZ7e2<_#yWhzICwzlH; zCpiyIe*zK%kihScZ8Sm>OsPl$zdxhX5RzbukOV4pufw6}lqj|MD+$VOFWCj%f9Lj6 zV4Wj%FO|sb?p#@!eVwaBrqokzuWGQjbWxd8FfaG6#z)1On}R9bNnmFm^$Vt=qo|e$ z?B|8f#yrov>@6V8y}kAf%vZdt9RlsWP=8G2NU2+zhk7Ats{SadD?(YXeJZAMq-aYs z>$Nph)gMJ2gV2dyTc&cPXlpBef0F+~)1QFE?InTVAKL1b%-;lM#|&ijV{< zbg#pq=#;2JezIr_mfc>m5AwbO*&V&Fp6p`pYbd*$_cfJW>V3^+*V*WIZebO7FM-h~ zq|Aov>#ol3h}q1!N@PksKS)Q|2y# z=e*F92(0izch-)D=7K3g5?JDe&Ou;`7g~tG6JDr4#SIDJjF2Wq^T{lq7EMJt&h2AA| zsdvd#S5Z;g+3~(~nUlTCek7^nZT-pAX|L_;l)1Ne$yB3J1#0YS&yuOzCS`ba$h?)S zAn~^--*c(D6HZXRg;Jp$jb3xFP~kKul=v9Yjs5%gO`I~d~SSR{rkFtLxmGbv7Nugp1RV6z5zgQ=Q62I9kq2?}qk<6o< zEAtuV&)&6yG^<^@#O-o6>_IN7i_D46RkRYbsS_%a+1$Ch9*%jca}`~PInulAqwpxS zOI^B*%;TLaD|5bcWmMEE=gMlB!stB8+}2f)Rh`0!C9}x0lBH+f;Hofso09pq3(Wk8 zxyre++JI^tyeQ)yn8FIKmX4M9vL-p1j3%P}kAyVsdt3#s^!L5XZbTuM{0uS@P?wRm z%nCv`Q13KXcjB07uYPuWl~EeaInvg7)dVA#OieIy*}7GUsOeDyDNe;kEY<7p;e>Wd zx7x!B8+R(&XM_ikBBX6rbX1W9RBTGO50hgdSoS=X{h0R^$X?`q^<+QoeGO$V_P(aF z-}Jub&hF)&oe$fcsG{ONATWA%%G~?-I4=4w=m!JE?cG& zvd3=W9vR91o@GM<&IO}WCez1Vc}?PXf|GH@io13bJ!+1ggej@$g)20%Uao*5nTm>j z$9Y_$fR^x2a&cX(^oa#=A*X<4|Ak4+(au$L)21=&c`*7l7MZU*SJ%%lzjLmlt2c`S zZv<0*|7B~~4O~gI7PyMWQXHWqWdcQvdNv$h8#)@;msy~|dhH0StqOJOhbQJ-Pf;jl*9?J&D~m(`Z$ zSg$RCLLW5(Q|Bq=N%9*4)!nXSPR7*vh@v#-*WA1_O{u$?Z$-0s+`DYu+LiWhSF$o+ zbFPf;o;zH3S(y(zS4Jhba;0ZgvM?%L<|5D1k=F5xKK69RbhIOvOr5nzD|uS7?Kvf# z+-b>{Vjk;V{V;F#E}bc1Brnq+v#h4Pw;O5pB1~ZvDDyJUx*1a#v64qNtDSn)2g-cI zxiUJn9o)&vCQnx4H9=c_nA_KE2J>R?(ym|S_BE^BU+rBoby}ltwEH@(kt=0iwZ+@| zgR89;Mu9Sqa;&U&T_-&6_p{o4 zZDu4dGrxU3kRkbDW!z)Ik0DuSW-vt!^(V_d9+q-2^rVhjQ5xNjkt=zhWc1L}qcdVz zZS7HoJM}B--P%>;M&81^tdImM+}}?kB*AF7DpG@pTvkW|`oxJSG9?(fRGb7;gi_9& z(pKo>09w*ML_*pMJwQ^lhtQfTizqrb`sj*WIzoCbqzIkPPhA7RvX^Gr!@aM-PDyd+ zyPw)hWDaw##BL{}5TC8xw6{2zd2HOv0m`w}%P?jtxUui2t3ca%i-F|;?% zPp6jbC%i9_U2#{t@g%-K7iG4iB*wEsmrjUhP}X}@?9;rjq1`XVtp_K1v6gv~b0zMD zWK^-Dj(1UsT`nk%|Gm0Rz_J=;*Y~~x*)6>NUe zVA+Ud?|)c)E&BTV1$%0$D6o*g=-9}d=v<|DVczFmGGB785}8Vv@qWzkGz-t3}|mXIooe)yC5v||;?^hG9qL`POud9q;wT*l`$QL&cp#}lHB+t1m4&ZESBYglyCXI(`jL6w z;15k;xib-26raX6VA)BPz0ms-PuD`ObqAw^dMf7m-t{1+lpJ?>z!L7l>zJfa z;#5j#D;HHH^El@^Iw_R6yCk%Oi|Q&f<6Kz@jq|SCFlTtzU6`}I%Wn66T9rHvdnHjSBZ?B+Z%HCb|37K{&$G+!0NMJVXGgxj{zuyJJrlhUB>$J# ziJ`99|46$^AMQ(Gv=!B`*Y&;v*|oi|p6rjkuc1AB(%&9VbQ}L2vyFER#k|tH-ouo5 z%5zNGbzM}k%%#p%D$@%ko@4c4U*e(?{|l$}xwNUHWMvL>u8ahH9~t%WzGvAPE#W5m z8Z5gVWcN7HJvS4xRoqX+MI(~gjj)tgxnxwaqB1Tjak^Hh{8L>$wW?wt>3xYB6}K^* zs79GBoGUA{wR4rolz2)fTf=VZqwHu8;!;tV(vwSPfMwH{{fPG^I#b+WFD-Mjb7gPA zlu}9?S3waq<)qJma-S1UeyH;{)D|pjL-xVmS0KBC_tlf#$@?109?Rd&OFkkjA71_z9 zD>@3Zhj-Z{rvY5-gt{!i{Ls6S+Fhi5iqq~A(pJ~ei)j}rX#!=w9OAmA#3laUb}J~j zLcVS)vqHW^+%L&E*ItFVM4c~izP|;_4zcX_Pm7;CssBce0$2GPMCL)nMMta~=CR%- z^DO5ok@=EyCB8IJy4sFXEbU(2Wp_bQsx0~;PG)--RVpD>7QNES9O+m^GBuTy7vE&M zu1c42LPsl|7fQUZDyox<>TxHgk4pULw-J<0TvQjC2RK*e{)ABCqetmgX>`C39c04F ze0JO^afQQ^afLQ+37n;1*&&quvG*0o{>=O8$^O#&8p__hpF6-W*ql|Av}WI!6g^30 zzUExncQBVYSBXrir#wley^5lu_eq&=IH6K$d!fXWv?}b{F6u~`+c{TJZ_G2jOGSR^ zT|Z;4_AWaV4PXaH>2fsYMc$Rv?jr5=nRxWq&>?MgUG~(Jk|vP$Bvnln;<}~8C7y^o zQE)p~c;YQ|43{cOtCI>PC8Q@Mh&?)|8-w!N1=dxS!6Mgis9aGiLPJxFJ z7@bv_yL65`Wl;f*(2e@>_?J&D~S7S8WI_=`iFh_b<<*gK{ zmAzamrE0|&nYcCjQHjckzWJFocn*oTb<>QTwuEz&k@M0b=OrQ+6RGmbzim>^mE=zS z3hMP_q_4I@WU|`EOy%ElcE?$v`FVpSQ=$1}3n_B52KP$J%}SHYsk*4IwJA4m@Iy(t zHE(v_;4bWw_N{!sG&3Ez)J;sCU1?Ho*5DMmt)iFZASxzM}pEu;#rbLom@E_SY?Re_pG`PC>1 z-QuE(WIp0trAeX0;~**2RYDIqp{#`Zourv$lQYR`CLP@XvPWX}_O8%Ol(uS>ZZhw6 z6=YPwC*CFVC+A9hQqAU)qNC$^>)<&wp$^KOs{L3BEq7|6jVO28`craD8T?Q($CSY- zIi?Iw$uTAI09B3(18K~e9N>;=MpC`puoJJSV_{EpQNfv2*nVyMR2TJod#y5}Pe_>+ zwqG~QNLOUyhnD3GZ-qN;nH$KgJ&V4+)ie)rLWyp55LHHnid07AO5V4Lij+%&QIzUe zbmY>F|Cp;NA=Ho;y}!8^zQp$`U*2u80= zG8;QrMy8Y^mKADFhkJr$1;{?p`w|5xZtw7fMCs}%i3yG2(v?0_E!p>bUn0BW#yROk zW1>uQlwz4TI#)2tQ;KBX>7o(`_+c(R2bK-d*{8=l9QhjB;VbSo*JvXuE-Lzn1K--j^7O;&y@)9a))&J6B>P z$*5vQwQ*62ku>K{+0WfEiSuB0tZZu5m-+>gC8hilM#@(#DXyeY;vtgqug;~oau7;V z5(7zjgrvB7GW1Kpva!kTc1CE;gbE$J3ofJxZ>m{LETvS(?FFRLOLYp(C=<{kP%w4=|SIphL%Z~B^ zG@oo}pTgbwyDnMXKR(W{uN zoGUAHgEQ4#(Fn|W&Xuh-Aii1vMn`4BS+Q#(7`c{VuJkT@RFY%qs%}1Ylgj*z`I~pC z!Pc(yjJo;OyHwOnN)#PcG!?d@ zBbNl0hJ{kP{RFmV6$z=!l5n>v6&(dzH&`U}8m_m)Lb;7iPY?-4+S1ew6}jx#w62d) z_K25#kM}hf&*IKFqPHcPmpE5;0;ZHw3fK$EJ}#<@%nO|>lN3ta$$D8Fhy|?!zex8v)O^lRRDp49aO5&%fo?JQ?EXywYGro0CeOXoDFao3WG8a2n z_AN|FrHo%fLtRv{%6KfL+q3{njp5CYe5Q*fa=5M?#9a*b7a^yv@68kxILV zr$|82k+uZx^eij1F_$V-Bu!OTp-4a3L~AKR5~vWRO8n7E22t4VKjqc%yYbJ2<9;Fvh&>(p`Dy|Rux?8 zT`yw3>Rnby0{eT)ZJ3IV?o-vh+Ow)r+#asT=)c|_tE>K$8(e9+c_G!Sv=!1RM~aYI zs!)+C(Qc+hX)AnGMp4>_XiJiPBu-OuAQ|nc1lL$7CB069Z!J;L!wmmzuu=STUqX+ie$EMQ9V@Aao%+{ z<^|sMAm-!VWp4r1tsE&2Z`HqpQ^_d3`iZt%>Cg47Wcoz68ib-FAw_-cg=(^e>wyu= z_Mx;_`6vk}I*O9OhGC(82vn#@nyRcqk$$p?ic*9mP$5c{_@k8&>O!%7!Lq|GdsAL| zQXk$59PYX;kvZA9GBO`^t`eC`oGT;qBj+lqx^BD)g zmK}N7OZg5t^~fu*guv*1MdoSFm63UsbCt-PdK9L!1H zbu;Fz-enI#TeOdK>9SW~s*>msti<(`XIUW$4Dl2RC^{0_l1h$n6=hYgicL9ln%ucg z+l@)HiDn{g%_ef$B31u(KD{c@2B)Og+It^uutM%iVa>0B9^Qc9^&Lfu_d zvCO{CrSAuDRH-rqYZzEow(NVoFY%+l;x2)cQjjQ99VPLj{}L`Oca+56+*BFbw*2Bc zU|E5(_x8R-fr_gMC#6798aYa$z@A(>)@Q0Edyw}fvMcUnCtV`*AI_CHi9@(F)GODL zJ<6;g87kimB^HM%A;^L?AKgWMndaQ zQp#huA(wXbRo9X|&ifLbD6YMeE|J;Yxw1VmdpTE$Oo^v-vODaoi^@o7n0MJdvFoyP zV|}Tw2$qy`a8mwCG{uz^`XBhqxlA>eN5wo***&SGnzza?&Qw?@=7nL~(Q-aZLBD2V|to9HtjRwmG zC;J!gOWZk%yUj_L$b8hfvQJ_@<6I>&C7yEUjD@|xMP($k+`DWiRffgeyggXfiR{NO ziLXU>&Je!EtPoDXo?!H0Ci5WY%JjfoI}6{Jn>Mbap3%H*W^rPr2bbysBCMpZZw<>mnhR5%3E)VQMb z88owA+X`iA=~UlZGuVnwKPOg50vC88wIsnvNE=t-431$d#=9*^ys)Hnt2El0Nc$=3 z);>foTcie|GNKz?M@Z)p^t<#WC`Y0k?t1t6Gf>Ke|IM;dV%z-z_N#)>^hU&L+=H?b@h``V8Nv^ zDSGJ1+}^n|8O$fWYyHdPfCgZ60-9YFyLJUr4u#Tf=!A-8Hgm45(yjC^rOS*|o!J{P zKk}~6M#Wd_U*T9=Um2SRgV8+Cz+T705%+;n5zDTPU0;BaYnO?!Yfmt8jmNyxyOv{q?Ooqv*0?Sfhyq(+ zw)3t_Fvok>M9it)^#SI`-c^HRrsEdL`(qYczi~SEaQj-St$xP4=8uT$+my|WKFqYQ zhq-+%)xP>oEY&9Zk(OR@b*!}vjI=b;IQB*@W;?Ha zIhymm_VMH6x_g6>kY=GnlAz!Yt1KJ2yi4Aq`SQTi;}%ilZFRQ)O1(tOEl>vLB* zq9}dzRd^}T2T&dN1Ww$SdRkO?2T>Zm2}Ih-hhIWy0>Sl7Ev)8?N33rnwLPgm%hsm1cAIk(AMkz=-fJ zb%K3@k5X|OL^Obu;jTnegNRu6sjm-7owA6cG;xul^O@_kZIs=ouEh(&|yE*Nv37TnSY8DM3F9NHB_$rUX-jBrwjM$!zk8m(^Lk z(4Cvad$)E?Ek?QY^NN00iChxUjh|A{fqXz5={_j3dNv3tTuM|Vfqq^{ z0+Zbbb5_Syf+-bA;8HKtoHr7^p+r%75$fY!RuUCS;6^W01GWSsA!$l5r6LL3?}eVE zCB1t@QF_HujuasYyzGTE8wo~H(v)C|kOY={p`Cb(D0XiXS-rfR;9d6ilRyo>MYEaF zyMD5IQ&EnTic)^I)_qXn#!{L#KHYFkow^k5V!G53izo*ZrRc~d0iC)Oq1gy&A0mo| zTw&MFq;U-*Vri(FXo~g_v~}ttijGu;GrAOQO*CR@cTNfK(nn}(x1uOjqUgxg9|3*o z5V?NDwK}Yw+s568fCM9LY3hS7a@o^Wz~@yH_qmnTM_F(0>Vv7D>{EnN9(U4|U=$@y z3FanrHR<&rj-vFV_cC9kZB1RP#VBe8>|NYPa`sHjbG$1Dp*>+MI;9U)rkoLF2vK@7 zNQu&&sE2+;*^wu@e)33(N*TVsywW#UQPi2dq@Cm5QWM7^MMz(HDLN98fZqCYt4Lpz z>07cWN?(-eO(8cSeI=&v!=fmCCAKpBdEB30FPg<)>CUVj?{lvWVjQzxj8pKhgf|qw zJr#Fv7<)bf*WV~!w+1bLez(GM*8=ci@KNxO@)x7?Jh&;pd$2Y58vc(1`Tl5pEAs3Q zwgKCNw!eJ&%UZ?r`w9FFY`SmkxB4sc&;yP<|kMV>aF}655 z#^t3kHpO3y^;G|0QpU;#3A$|P>wxW-NK=q+MZGFrC zVEmT!vnMzfRG&7V_ziO89{{feR3CNe&*~qVCto_+f1QtU0sU?9=gJq)?u+u+`gcP= zSN>i57M4rg12#A)UOx_=(>7jD0_%dOgO3!(*O!AU4~W;bS%0@*ygq*a7(XFTC;U~+ zKmYCZsQz#1!<1iTANo>%c0QKhE>He+_z!@?+Z2{djsYhycjdMD=KR5U?c;+oAQ#pqNz{TJP z;0$mfI3Bzeyciq}T7BtW$n|f*A3^CVuXL4Hy2@+yrK>uMn9EV%X5itV@>XT7K2=v` z`qJBP4fO^Q_Yr8%i}p?X`c2!y@{;Pae>y+YI4^C%T+h#+o&R~$zpdZS|Lr{an(GGK zj|Jdypw(Yf{&MP){$}Xc0N1M|pW-Xyi}xM*_D8o3xHH%yknb1bl&2fK9$;s%1e9(^ z*6Ks^wDXf+bEs?1rRLrTJsYpNZOio?LG@+%R?p_MeC5^iSQek! z+>bVXD7@Ri%Jgjf2;#2JF<$)90lxgwv-xa&s(UVa^T1~U`XQd?ZT0n>(&zM~wuR*s zeNO5ZoUd8Xmd9(5J z;VlI#)3fo95%*$_@!~%j;L9&Po6pwwB|1NY>QnPtjlV|QLK|AXR-EN&{x(SdW^D`0 zFX^1=99sSk@N(s=ZapXTIUagHv-uR?mHpOpO78{T@k_t5c=3-R&hq7#p3P_ZXYyYA z9r#*FVL7EmHpU~1W4sn@-Z@_P1k1@4{uqJI7;rpzBUpi2ZsyB>3Z3`B%IbMFPkx(& z3(M=uf@A1&exTpO^W-mqzY_cy-A%g0dCQYm_+xpV{JMw4I=g_XTX`x-mz&$?$$t?3 zc9SUv_?g~}|w*^-a_XBA4r7K=f z;!XmktGv=x-c{%-uhmyx@!moAU*Jga4N!RpuvVX{t1^A*J+g*++J`sW7nY~eK4~9s z?;88HkG7tl$-5r$TY^V`mM>m`;gzzk14_rnD^9#8;lBu;4L%A=_XO6J=_!78j`5Q^ z6qZZg4Hj}A_X*t3x8}*0?jqv!oKPOy&x7!C<$nbKWAK}zxSvDVcdKvtzgEJR?ke>4 zxuNH()&B-wu6*$d4vXuq#`9F43s%1-yj=O>bs%my&pSQ$to}Cea^+9#7}xy}_$;5Z zn~=xqFM+obe1vnhB5>Z8=gF7u`iIALO+Z(9Y&})s<;oXtTjF$|D38_O0$#5Cp76f| zmmLxJv1{P(mo&FcIme^Gd7!Q57v_;GU%VaB+m!v)dzIC1054a*c>58j&t2uQ`g_33 zl`mcqae5EbIkozS!poJfIlo9;U!FVq+_Cyk!poH}-csW9TvHya|1P{-`Kt3x>b9Ss zwat5w&8PU8#9RJ6{ENWK;va(d7^wIM0`c-I&gPd+D8Kst2>nmMuLJo)Jmt0h>Aj;j zbJTM;^u8I2FC<>?HM&na;x7R!ix^#9IPky{fUXkhOGKIyevByI_@?@ufy!gLUI|3FrbbnpKaDCE&Un$LrqU z(Hq9=2R4fFetvJF2!A8iCxOq^j(J}S*N@i^vp${mE#S+w;_Kh77vluh_knNKi2WN= z|9Px01mmmz&&%jzRG^<`^r3##-)j1?{n>tYq!0D;fvMkD5?dRr6JrDFRbI_mb5tFg zqw3HcHD~2jU7F*U%u{pJoLg`X&Y;dEItTbQSDll&)Tz1Kem?D2SnjHKMvS|Ri17rl z{9=Va>Yg57-wNynz6xf+?qDad2y6r%1XhqPH!Xhxyoul_@Jg^BI0RHZC$YBm*nHwi zXDV@zfR?X3szY@tPIakn8(+15VYzD)aKg3m`dx4*{D*=zUjEa#uDBKxV;##EZ!G-r z@ZJgVw>&e}*&Up5UA%rBwE9i*_B~%XZ01Q{I)*%C!#-; zx^#|0{7d1F0-g6|dJB7-JRlPi65xR1*Icy0y-Z^~a8jaT#dR zRauoMU%HLZ?~ShVSbgye;MW1S1TEk4tiIxu-^R%RhtbCB#X`*4Ol$SiddU;i`Cj4jAG$hu;F+2ef?4v-*ltej6`;Df%O*>+wK+ z;&*`G8O#LgvplP>IOVtX$v+VN1!D`#DW8KOelPeZgQo@RvplP>IOVtbMupgzm9`ifJ2n@|1~=<7ZC ztib!S__N{P3(gPJXL(j%amsJ&>&oX+AMlh*3dvWJkaWgc={Z* ze0{F!b9edWh2@lXBV*L(^e^Po=d?aIpTe)tajUPm?aqz$UL74{<11pceDMy3-*8mS z8xC6jv+zC!XTw_(;J=wCU%FM!`}aDO$JQhMm*n{t`~kFl%Tu4ycX8ym@v3VpuJ>o( zrhp-SE%+OPn*{VN&+02q`E5S=_e5XkN;)BaBltUl%>wmVp4C^J^4om!cT#PyTD*-48wrP6eldq4+G5{`ig6IeysN4f;Z!`~a+H&CDDS$)MRzs)EA(dg^vF6o5$9pM*&C4u@Z z&+02q`E5S=d!ye6>=(!v;@uSBR~=kfo^mU26W)vTo@DFUFi*a8_ayG)i{pICWA(*v z1b;`c8EE;IXZ01Q{5F0fx^uv-^nMlie7Pl0zI5jk_b~VvXzK~_ehl!t4k;{`=mYLD zEM6Z0TK%K*$?^V@#nyw2R;y}$MUSc z;*{U!JK(~&o{7wPB{L-`ejC>XG&O)!pDSO_v7dRKmGJp|0chv(HN0H;m!UHi)P1!qaDR=@lRt&`mAPOYo-exZ zZT&Oyr^3sXFW!^Hb-y;QU%w|~_2yu@PwbSFay? zt^+sX_vPw=Q}I6pu4(+Of%vEKzXK}oKIFIgUe1&MJ^Y&J4FN}i%5zj8->-i#Ub_9z zzYBaLkgqrXT>0YJzLdw#?`-sQ<)5-qtS|)p$2ReLE@<`7%#%L?{`Fv|tzyOTV1s(` zx*0fN^LTv$ICGPDeIHl^b_aI_4*_Gg|MNeczf~pqhZn@PTnE;uAFtbizS7tlohN@J z`KN(jZyoD23+&%DdGbeW7VDkHeyn8QlxO&s@%05@XZCR`*3*ew4}Z+|fBuGkTX;37 zU-~uBk>2Tb<5=m{V7&+Ud7b!rRo2S?DSn?hwmuhA>BPl1X*mbIOajh}Afo3U;l zh*w>zTjxh}3Z0)<0_R0K>VFb*ekCw(&37R4s%*Ztf1NY?oYFbB{5sT~EB{3Jr+}Kr zB+kvL_(Su!_lv^v67C0I1YZFk1D^tIz4N=p@qhjI5aY?@``<&Hu_B(|{on%db47B`q==21K zfXWx*cgT_d_aEYZmV-C^7_V;wHwDiDZN5v1n*dG(=YW>KJvyzy?%)Xlees*-$p7)d zxc*Haje2jOAl~W^z<(ilF*r5A zUxj}w^wp>4VD-gcCrAEa@Op#NRbN(L{4P22?|`=m{20`{tiJfOa^#=2u&`WW5Ljzb zydD8g#6RZI*gux_1h9(vNBpT=Ux&Dj!T0cg2i^m|1y(WtNM{z;_dus5C>`aIj`A3} zD$-Hj!RTBHX25engyT$v;EuzZ;|285Ael%9sXP3r=Zmj@l>DX z>;7s+{*yuNljVzd0KB$f7PR~jPkAhV89JYVgPtxdr~Cr0|3tj5@??zFSg$L8Iotms z{-#_X01gH_fxW=SU7`nJ=hVn`Cr4o>r;j0m$Z*{7suER%>zh$WJy1HzBOT?jdeTweW6(JX+ym?mD$jPT)rab+Oka8()=+OHajU^9&lZ-a ztPa}zHSqTUwU0-zpYP&t!#=O=_%Y~D0&f84fHq%<_fUYZx^JR>^>rToS^Xk-0}Q`U zfG^%~_?LoLgH}JpQ+<}N`|C~g>QaaH$@0bf0{#kcok0B|p7L1z8b1}5yB-C9{WV_K z`Ypxu6rAIB2c7wivS4pyL+oA2PC^mAePI!m6Opz`zx*aX#fag?!30Bap9vxccCBU@Oq(dkO!ipz;hLpYmwV$Ndu5RluCB-YRr9g?Ak~ z)vB~9r%HDye(js)DqZcP)&D2DkAhEtAA*+Oi#pE$&jPOu)c+j*FF^IJI<3C=ujj~b zz`pGds&DPH)fa!q9Qmig8wpBR`)c*YADAQm0eJ6$($&6Neeoa9kzbEQw@1G|G(f8dKT|8j5vxCpfT4xIDu;9T%Qus=8fwDDc>uW9_##J>PG zts38V?ZM^vSAy?>OF>&tWqK=&o{hKq%C9)fS6$Vp_Y`n6X!)zjpDSOyjnTUUd=S)} zl-K54AKseAtB>o^c@L~wE#4=a?^>?s$`{Y}r94*u7W8xF524P{;8^gEK>h#3{|q>? zdfewdpsnYjJo$CdX%1cpDv#9{f2$n%`@%aMRNZg0w)zL<$(OF`UP1iofPM-7T=}X~ zx&zmV=RX&;^?!(duKZ^0l`r0}#HnuWgPqSxc)9Y$ z+nDpHy0s5he_eRF^2KXNoc2}wVD;<5%at$QZp77N?%D^d-xOZ1eDT^6_oc~W_4na= zu6*%26Sv6ZvHBgjo-1FxQsUO%JifoS0`(kHUYqX-;?^`?&xupH-_EbY^AEK7j)#{k z-|>hmL05UK{s7gHBmaDOqrqp0YJ=Z5lF{Tk%YmA^ChQFE|<-T1z02X@4NB-k2k z3)=jZ>2)!BHs0zhzv3)kb)AClt@YyiUjQxtSa`Ye#XFa{TIeaMfyNJTSp6^I z<;oYY7P^Ok%479^LqAu(cy?cu$Lbf5KUaSCrNVNF8Q^9w$LkW%>OTtaIq(U1bzX_% zu3|k2d==iepw&AS-r3*E{RV6Sb^r^&EkUdIAv%p-{db+cUys+) z8Ag0Dyhp&0-lyn_e-`}j@K;;I_|=ufOLuehCxD@PYjZtUzId9K`cfV{kNW86%2%Dw z5U06opI^rRHE8nJ`5U*LMbY4e%{bbCJH{l;75O1G+bZcP%X}r#u4QhJR5Y|55M8{PVykm&NO4V7XX@ zKQ4uL16cF{@n8jNxheiA_+!BdpygYh)mNPI+xW-PeI1-mT`vXddoEA@y6+X1r>qCA zS`x2!2u;_^5lOEe-)^?WR@0M z>GJhAdGe*Z0d=3p{BN$``Kzak{Ux&sM(H78E@m7W7ue@A$^^2KXQ z+y);OmP>94Y9Dmp*?jvDx2ExWF2A-k&O4U8kApVfQ}A--i}x?$dZVj6R{wo?x$?#P znYjJYS01bXHN0H;;@N#s9;;uS{JHXX|D>>7q9s@i_5iJZzWmMs-4g=(`SPW6`)6?< z-IvF>{uePmga1(cwx6r0Bm1wIKmYR>9|sj*pS6u&LHuuEb#%4?Eq^Tj<3A;jsaJV_ zCZF>54dqAgH?TUo+kiIzj`;ThpGQ~uEWdf4{6+9DXYLc}?{QH3a{}w#!B(K^SckRx z*1p*K8j)u=umxBITK+ZobzYQ5by)t5dGg&;7{=8u(tViy(-sRgU5hdfqR3sz>PtxCmr#m(+7SlPjPO&Wqlv`0C9a;TYX)B z3|>`ujlg&D{|qh$UjnV3bi|X+4(P2WPI+EqtvpumP~ud-?a%TxkL{R;otNc{w+sBg zgL{FNAL42Lmj48fH^D|#W8*;3@?Xf4FWt|HlWwnozWDFLUkZK zUMQd9Liyyk{hm#|syo#0GU{5>cOj{~iW;@ca3j{sl1 zP(HY_XzW#f)w==K3*Nqj=2N&btkhRUH_)+l0U(R|QI2jDZUt;*z z1o+~G@+mHqPk!6)0raQ3L;X&pt~HI9uATFGHSJ{LwfI|6-}T^=;CA57pjA=)CWhZ6 zz!xu+PjR7q^4tDYx2d2L!VNQd<-@taYV&$t?zY5xXitoX7@$X^X3p}lo_#+Je zqyS&MP(HbCv;OdYxM8}8PoTw;50CvXqY>KEca5Nr=-0{ou%j|Wc%&kXQ~ z;J*mG3>+KaPr-i^cpEq;z~6cIxZmdB8Q|AoA=m@l2izaD^9bpUg?|Xx1^lCWhZ#Lv zkJVRx#aX`UQooO){{pB!=CHQ$-SDqzymW0|?T5{`fPA^~*WV-F?*j06@I$Z(*cNO6 zHUe$_klrx(yMQggKdRT<=-GO#zVa*1@>Q4mos0fLP<>2eZR7XHzozlhwRyE4Hs37r z<;w4}XT0CnfH#6OL91VkzZ+Nz_F7B)v(dR6yaJpM&>xm3|HT92eqRCK0N({|zxnd3 z92D!<1GfenfL6cYzl?tv^>hL|gT-SLJ^_UVW+W8i&OF*8#`S_c$|` zQGt0_z0Kg&4e+{~>RG?nVp9*hn@uzUEp9J3me+BLQX5gO{;NOE^dTSd0;2+HYe-*EO=LPDOU-eWr z--A2E_i5wN{E)8dv-)_h~zIab_ z{X_5*6Zc*qUU91HS9sN#?@HFIKRBB;14S=)SyQ=VDy<`|tj1M$z|e*xUDV|+dfL7VSYcyEHAfS-ewFW!e-|3y6X zzhnJ_{6;`=%2SuPZNQDd&A}>QZLN*oaQq{{8xN2By%&5F|A*it@D1#WLB-$rABvZ*%^Rw>Ds!p`+`vG*>Nth#W5MyDbgl@*Ps881b9~VxB#^L5O4LL z%)giX4}p&c>Jd+UE{6AffUh}7_gi>Bf}epaz_-CqK-IB`wVkK(zQpy{LFrh27xpy^ zb_b6EEx#B3bHNM1p#lCV{Ef}I*|sP?M^^tT_}76qfwzE`AL4!VC-ZM0|8($AnT;eegx_b?{N}IZ$=n#oEqOdGF`?L!fjl|L^Q;OK?AMDEMdbcPD=r zusBeUczbib6zpl@j$qvuJOs4;Nk{WI8{T(#r)&!LW>t?$2$uX<}z@0MVFumIc~wEfulwar&u8<2lza96Mq*aTFa>ciG69o5-} zxPw6Hwq|YPkHvoicrtk3|0w@F>K+1~0`|?ZpS8``Jf(XVb=(bJ1Kt6u?u%I4{Zb$O zxqc})GQby4a~TIuc@=j>Ab$N!eE)9+?f~u#+WP9|$v+kTY2aDlLeT2}xAW&w_k-X} za4vWwI0L*490}TfHIH++KHS7vzIf-tyAm7^TE2MFoeWQPD(;#<{FnH@0e3Bq&&%%M zHef?=6}$r0wjS}UPM`k~eiQ2489W&50Ja9(fGxmQpq+@L}jt8aNBM`5Buz6I6 z&38UJL%>zwZvp)a@s9x4Es6Jk3-EIMR|Vn~XL(EhNBOT&_uJsJ;LG4c;M3rA@OIGd zhvqhc>o=J=%NOrjcr(E{pyi7v-TClTr{ew@h`(U(c)y2&)Axzj%fR8_Sg<*Gw5}7k zN6R?Q)@SvH!yf}${$%_!!P(#!e=`3@^3MV94b&svG_F4cJ{sVQr+yd1d)~w?3dCzZ zs%t5{Wk%0iVK~OwH@DxI(G=vt9rz<^(rn@Zy~y+ z;91~5z+=GvpgrG;6HoV9?N;&qqx($vp}Wtx-;`&2*2<$gD$A$%p{CAI{AhSnz?;Ai zK%4LXia(b8H-onZ>Je`O*JpwE1o+~q-v{75Y~t<>#A`mP>os_98l9I}tL{4jdaLo* zX1@!-%|JVU%d_W4aXLT$>G&y5DVm`SPzPuj;5w|4^PY)4>M&#?PJYL0iw7@=ry-FE|yn`o;K54Sz&{-wVI= z#S7`H&zZ!%4So(j2)+W^`tHF0XT_^7TlbcM{j&UTsDEwo_a$#zuxchL@N96%TH=pG=T>kAI47WgW1f8V^#F0-gTDpz^W{H<&P!lj=DIa# z_d8$yYv{fS)<#!(-o?KZwE5)!l5&RGf4bC*8sDq-*so z-|AWZgXk>;p9T*PocBME+N*7hBl!0U#|QrX zLh*Me?lN#Rcn0_nus3)bcocXv*b3|b+J2NrJX@dgE+AfcRhROrF6C8S%B#96%RiYs zb3x^w_#evO6RJA2==BE&fvdr_%^!{KG?Q;cAfM`}!~M4<*gSAQHt!bSw`YNuffGR6 zPs=>{A>Dfe`a|&BIzs$erE&cWz=h!3psoL7{J($?^oZj>0xkcmJo)X==?TsTmB;Ff z-yuhSA9&}3s=M~laUWKHK%RW*s_us9{w<(C+|*qeUv-Li3%bh!`kUd;m49B(cn+h$ z#o!y@1n^$a&TAO{HI0|9&ATd)Zz29%`CoH?R68>6vnE&vwEb9K19;o7C4Ls2(ZcR= zU2lN*9Tl%10gnQ&0q-VmYu2`Y>7NWw{FhjV_}#hQ6YLe>Tb|Wdobuaz@}GzPw_vCr z@%zF%6FeKV`j%()6{q|*UViP5=6?e7)*Pi9iXTFK!@0<9{}TKg{dWq?XIY+n=^Te%KX6Py|496~@^>51ru>Ms0Na3FKzn{l@%IEz1J4NX z#XE)T7l0Rn1HpfQHlN~@XFR-XK&x{^z{^eCg@Jg*sjjK;ri0glH-OTez*>C_ zWNrJ|mAUQ?wgwLYEq`Cb%do!gPvR^85#&Dw>>H@36n}4UAUFtA+!?HG|B6$dG4QT3 zI->&dcjBJ~ZgWn2pEU$+zPa%31D^mFgO)Geqg;OnTmrrVz7E=W#VOAZ@O}Zm1iu5N z`#Ec?cO89Q56%SV1o{k5%xi!>^;^I@z?q=s--CZK_#F6Rfd4N30fYYi-1G~apAX@G z4t@)M588S{ygkYHXYs$H-XFkKf%?T$pVg_mW}tq}LApD``#ZQDxFfh3*Z@==zXs}2 z-u2;cY;^2A2Xp;Wa3uI5_-FAiB>xof=0H8-UBdMl;4Bk2o%LAoYS8v49nIqjc+Y?f zz{f!4na^7DyMndVm*3WLU4TD}`|K<5`19iXa2Qw}%z`%GJ;coep9Y@;Eq^}#N5IPR zNk{c9B2IbMmi|ZR{j^?Sl4-TB4_sO3t zUphLE_Wam-ip=?~jDJ1+so=Zd_UFg<#WdE>fi_<#zAEuEhJ%a*P+hOMoxG^i;3%+4{umjoy9W z!vXz#`Lj-o&)Fhy>oel@fuODDzC8KTeV4f9=zeGN^ayiIjbKa%9 zcaHjx1mc_I7_WTW1o-kx&*ro95>NH3KgC)8uH?;?FP`onJqMM?>WjZ0c@6*%4D@Sx zR$p<-Z}Zvxwfk@RRX7Ja50&x7)ALf#55-yiP0`PlFWzp%={c=DR=+X4T={zb&m>Ow zq4HRL@&94?J8)lF{^{^?<^T2fVgEkCzy3b#-{^n#N1l=B+n4uFK|ohkUv6$6XogOb2fVmw{IQ|B8PN`R@Q{2I>)SGS}|~=Lh)W zsozK8Jz?S&1mZOx)%6yFLz7q21wCSX(0@-1&W zbi0C+h+AfK+UBVDU?9Flj`6B{mjGXW>Dhd?U-4AG`cs_c?@!)b`Ql}W+mbnIu4my_ z-X5%v1#N!m9m4evV5b1z^773uUDa>v(K)D1Upj}q$E81S#Q&Lk|3zGr0RL-vx$?!c z^(c?6zZ&^-<%_2|>b}r@X!SQoKUePmA{(^oVLBDs9Z@pGezt zY%w%GAFV;#?{auQfjh(79ozzJ0}Jix`zbxN9M_Y z2L5}X>efEk`MjDZzYgbQM^JTZAFO_ZJo!h%KONM*Y9FlriFxvGhd&?Gxz|2e{eR}k z*Euicyw3-}37r2U@aM`G?OlSP z8b8ey!{3)}##1O5uH8f!bh%Jd41o{hKq%C9)fS6z+J9S=?g)yHtwHoiXo zHI0|9&HHE|-x&P4^525L1l;brcz@c0KPV6DPg#EA=odG-g}fbBusU%vdkZ;1DEU$6x119k?xf;Qik_$PqZg4YN5 zH{rh({2KfT{199L+I(A1iR;}CYz*!MTK<*zCxF+2*9Z7F;lCAp9()~q1bhy(`I_Ar z*Y|gDcd$KZ`Nwk3PX-?Yp9OCP9|k9aH-Tfp@!+N4D6lVh0hn+8yNFYM)pG~_wOx<$ z*g1R|sQ*0XIt&~K-V>O2zI^Gtg5C$&r!9C0*bTJ$z3`s|4g?1U z_~P~F`X%7y;9&4V(B@N|@>~z^Mo>D}1>zUrUkJVkz8c_*w}|WSf*%C<;=RE2Pr)xt z+(&_U#i_39&&T_*E-0NUtX20L0lhCc7u(((>x{fT#=Y-|(a!fL_*HL;ulEA`fgQk} z;NQRlzy{z>U@dTC(B@Mf@y>;J5vV+>LwQt3U3kiC^_5q=74SD>9@AO>3sl}KSgUW< zrM}d6W%<^&zV@;8w0M6OvyUgwNZ-$9^w|o$9Gn1l2QLCUf=7V1Kk2kFanfyXbZorh z#M_uYb_eewz9wtcp*kzm%Qs$g{M#+@{0?BAnybxs2z6z_8^Iai0B{m`0(d%T^`z6o z#7Xx!qhsS0CtgkZ*bZDkd^OgpLv>cBmv4Mq^4tKH^1K-YUU^$Q=NaJmTjTXhpsipe+bZ-XC$UxGIOPxz}&kLy$)dXDZvAL^?SyfeV9!200&;1=MYo$oU0_y*Lzyg*+o z*cbI_>;0qgn&VTPk2>6cq0cArUWfk{_yuU|3-Of4@^$~8P94)Lc^-&&0lcB$Xwd41 zc*dJfg%`F;o(dftn-1-vc6T|lcJ;wg{ipM}mp!1uwAz-Pd>zL z^z$|TU8!de@LEqc&^19}Dm=1fK`r2=Esh-bbu=c`?3Etp0x+U-kZrx>td}fwulH@c#hT zq|e%*;;IGuSDf-}2d}ZwX%L8SjlT_eCwO;&e=z!*QdfG2<_fp$KM zQ=UuVjRFUQ!$Iku$J*-sx38znjL-QTaMM}wdJoXnFaFK&ZwK!JE#LC2zT%YM#(#|N zccAJzCQ#p3dGa^AJMOnRc**Q|Jrn!^e{I&bzJ0mg5gZ6!4O;$zdGe)ujM1GR&=sj!q0N?UdxAYaK{I(zQG^g{Kljdmok5WgjeDO4Y>1uzhel_yv%5OA>^A6qs&HzsV zF9&VDo$>Dh<{K|v8~0Km--JM(5P!pa;{LV;8-sg+wx0v=9||4@b`J1Eyy5`=X#6LE zXMs1ZCH`D=?gz8~jQgJg+I}tnxJvlaeHr}~@ax_i=dt>A<}!b9?)~w4DR|(0@wyYZ z13LSyCB45_qSuPLs?vw*I&FSj*L>ux$-s7@6m0?eWClq>VFL{ zSNK-546jKbMn=X1cTDjA*E;1H2wrtr38_24yY=NX*R9vW{hK}e2 zFo@oXUwWvbLukVE7DNdx6j9@ZKz{c=_uSptyEE3v@P70D@zV!yDqndT_wmM$&F@UykIG-;L1_n@!A^xu!&?3I`p8$^VxxObh`#a* zaeq75okIAQr*W&krc?hmKZ@@O{kviB3-Q-izQ%Da^nQZf?4jWPcNy+SK<=&-w)AOej4{%VHbt)El=ZCeNCtSZFwnvI`mh0 zSn6$c*k8-nIDUZsB*e7ATVE+XB2<#=W zx4>He|JU_S?3DWdBkT*XZ^GLA^p*bubp8oD;Supy2D>}#7}!lw!tjb}UXqVj(Le;I85C#2o24r}!{0$vEaCu|LDG3*G~?O=C? z{ePvm8T{FJtbgTcekLH@ej)KHPxGbu)N;^#s$c6*(d;x67U!=ZQepc*Rz;6yo|1j|PVXt{c(ya>}o8K?MTj@#R&3j7hd9Y#pZ;Yq7A@L0YFDhSoJ0jiRp}XPJQVuqrEy0V*SKb~-r*W$v zs~-a|DnAYWNw8xOcYR3wNA;0^F8G(gz60I$(XMU={xq!3&&$9+h1GJ^I4%FZKJr!f zXQb17UlNl4@4<`8SDl-or~POX^rw4)Yq{9`T!;HBnqJF!HJa`C;CI0O8TQc-ei!hUVc&%PFofR&{5#lx!VY*jczi1Wz8>tx zu-k<2i+~S>9Su7!gnu9KPS}30NF2w(b^-qq_8HjcVQqbe>1_?Y*O9LKH`06C=-GH| zyy{=mS-!@l`7KA>6|kC*ak#eWKSew%nqGCSU#$=8Zwlgw%D)8s4%qKtn_rcBzYEt( zVQ+)I9oG5}(_0^U4h%EF(_n9gwfa-~$XDGuqx)QlzVZ*k{R-Hc5WeMU+^VnX)W6M- z^4^F3cCSl$Ooz4nH^7U^zY97~!Tt+&(>KJA)qkjueARu^=?yF8zvdQW9kdDT7O*2>Ex#T3&9JY+zSj%?f{6U1wv~8JgFOrOdRQBex1I1e zg)M=d3LC~>IU@h{52QTbfqmn1xt{rVu^$6}{X@C7{yxY3Z((-@Z-3Y|V0VQ5CwLyN zt)A*A?*#Bpft>}LfK^@fyAp8qYxUKy@}30$McAuhAB9!F=iplXYFwI6jjK0*s{cQ~ zk^Z+D*WYsM?NHR;Sk&jquxEcNy!}5DJMm+&YkVTs#=r7Mf*;U}{-Gu%CJM7)COJOa4XZYE>m;4-p{Ae6nUK)qS*<1P5!_RZD7eM!VSk|^9(0C3IMb-@3(^{zo&Pr_(S~f-;2oC@%JZ`-w^a;9gp+$!CM);fv^RzmLJAbKbF7Q zm+1en^S_eo#juut0C-if_k;I1Y#s1pU~PKEPsRO(J>u_q*zck9Gi*C}_k`#v{wVGb zf{yA=`&#tpz&-=s`p~!WDZVA{KaX^(dmgSYgWVpySs{8=z#Cu>ht+bn{7_nS#clN`$pRDTCl^vm+RTE>j3Znt>CtNHo*POVGn>^ z2s;9{9ClmSU0|)A>L~9v@a}*;4)!!y)m6W{16RLRU;QfY8}R=H`vUBjuI~;a8tj2LN{A(N*ZLhWTE;!=uM7U7@;8A_A#5e=@vtLd~tLN<{_cwyQV60p( zh3yz4*LT7G4Sv3cRliGct$r5mBk8Qa{oqIAQeE|14nOl?H-Vi9t9~>N&8L>T`aLnk z|8?+xC+xkjufW=P-vho3_CDlO{agOu`^ZnDUXFo19`@3Z`YJ@ehQJoXP72BIQsAG$ zYB_2-YdvZ?+x)!!E7R|Qb~pi6>q*+cu%i^^AC z0rVyrKURMY+>gpvo-H@^WAz6^KPvww@Rz`DTq5}jxSa|!hB15b6X1b#QHP5&V94gVp2_x(xi1+W^2#`!UL zr!Ny;nBHU1RsLGwzW}_umw3X`zurrF)%_N{3iu6+_cQRK@|Cv|%3&1r)sHQYpP(O= zuRN^}EjRUJ^#>u2sC?yZg>*~N?zTXE4@Udh3U){EnqjSf#gD;#ZO5uR2RhZTCxLfY zh@RHdg(z2zOWWmQ;E%)FcocsT_g{n6d|AHcb63PS6jtN5eC>zVgLfOO_B+eh{__QR z`=GqFKUw}K;6>#Z?JMK!KCok94}!J&wZM;p-D{$xuYtAvR3G_IfZq*U3|;kO^_BmZ zi2T2U_cN@*oCp1={D&rq-ixq_ z$#Pu}yEE)0SnKb}UzuKYt>1G){LKO$m46fXOJK+BkMe_k4*09E55YbPYxCQiUYF6c z>8-x{*L0SzaeWNkGZFW_u$KQWcv1Pv`wr=n&{aQH|10pK^525qC$QtEp!{L2{s(>J zKZ<$iDcF5I(LV|HS>T_-T7R4WTh=8z!0rND1p90GBcU?}b^`3A5dFULn|_h_PlA0A z_I+3zzw#I0{!y@PA$-fT`kGGtTYrk52mO`*Bk>M}wfwWdi^^Btl}NV$x+lVFIjCRj z?=qxY(e!)6PdV(ju-Xo-zX^Tht8NnMo;7}~{=v8(m9IQ4w`+_atKW$GQTgXimHv1= z?BHo~Jqgz8U(!eZ+OtHj5cXi$I@ldxC&F5P8v$R@^r~z9o)O}&9C%dz^nQ}R17TZW zkA}7R=_@}KqI*(^eqZ^j^Ouz*AAeg#?C^nNzX1L!aGRey5Xb!f!e1Y}EnqeM-Yd&} zo4x?)wuIdVb}v}Vza97q14Qr9RmEz&vv7SEtmbDET%Urto-%Qk<67fi z*u7wFy!!&54Z9=!s6Wdu>m&bT@NY&vX}+~w_Cdb2T-5I&S`Nsk)|-uIEOe*A&V)T2 z*7ENKuI)+vXt`VdBYoum0{-SEZf$Q?U-_#>OZ9I47NAE>gsr-{Z`wt)$f9CRQ^pE z-G zLuX6;|yEfY%?_)_-q$1xC-NxBBW|(^cil*0k7zKZy!WJA9 zTtB`2~82akR>c0#9sC?ypgLKPawH;c0ZI^@5F1Cc#c4qn7 z?xulvAgs14%WuOxdKT=tus4UyuXh7~4)%u) zz6W~=>_f2D-}b;)G`;Ftzx`_@F6-|Z_>0QlZ*9>jhdltc0@ms)UW5B-*w+6N|6TYk zTu1V8&H7?L-#}~!@Dy;HpFst}dkyvkq|@{ltta=dguQxQx&9}t>fa1MRzD5h<6%#R zy#m(qZwG!4?ESD0|3-Yx=LgVx8s(z--WAse!)m_!;ac;p<+WOP ztoCc|_ck8oKN69@7Rs>*R&});tiJL$ipbv+yqU0CZ#oWGedUh>e07n_h99PjtRf zoiP3s#8VD?KuCO+XZ1Cm`nUcR*Y$y}A5*hwMnA&jT%0jsa${(hJbro+w(neW1QI?h}Dm69@_7QpTVTLk-S`Kr5- z(fxghzU7Yu@3-O~0li~kFNR$Vdlu|Dur~jv06!h}d|1`7{0aNX`&EswgD1)L-mqc( zGVse`XTknjewePtXZ8OKzYoAZ3;SY7Jj(0D{nugN4&f{BFS!39>?bDOUqjMsI*scW z@CKlKf57!mu&Vnlt~DP^ac%QIW}4Jj3G6i33Rugp2cCvK8uqvlzVeR1{j*@tg*_Se zR9Ndz)2W|p!Mg!gb*>6YKdM&xe<|#hu(!k3!=4Fi{l|f?XnNJPe%}c3cQNp&{Qv*^ z!=!%od%|q}DIWFv!`3)V^wx&m1a=!(tG_$&p|E3MOG5a{+Z*?%!_I=;4|ac8>rd0E zpE~gBVO6I#B>kzt&xE}Uwj+eEymN5>2H0Cd_{zH!_wR(g*QEPXNP11DalHWEOGf8e zTx;A{hv&CE!u(o{r%J-nNs?nViqHp;-fcIPRPpOxDo(Fp+?2WKCzf&-e zOo#nF>|9vOZw9^q_B7bDLik4kKN+eBwP4qW^E?%|*EkNv{c~Y2fK{EdL(*RheA}s#&pXP*{s3$9cQg2Rz}^r0 z5Uk~g@qYd<^Y4NGhhZNJiAQ;w&u77VF@&$>pt|3Iw+!}k*dExoVLyS@IG)6{El>5k z6!%|;RUOMOL%q&|JqR`dYxy<6kAyu2_P7xKnZUO-?Pkl6{pES!UkZCQ>~*j^?M)gKMbon zmcKjdbrkGgu*bpvxA?{IUj{oPBp&6B#QlR|D@?ira6J}wUs#(z)zR`;0Nzosjj%1S z>gO~*kKKQ&L*MNP1OI8?45hi$2o>yKa;f!723 zTj_lfQBVD!7{WgX_|>r2!G82#=3fB+*Tdc%5|8r!i2HZI-V?%Cp62&q@E$Yi?h8q; z<)d-E1>SqGFTuVJtGX}XTJv!mu5Ei*AMIvK*s-t^V7G-G32W19I_1qqyR3y(9c`zo zqkekRQ~sVI{uNif-uzpCXF&H7*ehVK3h@`l)Bb1me=Yx7#BpOtJQ}Cvg~hWk>cy6m zjw?3)vOe-(Ny&KhKI}KJ%V4d3U-{dnMLz~R26iH>)$c3crvJZoeJ}F)4D42iOL-K- zegS+Cu5J9CzcRh*TE7Q|_!|s7D*tcb{|mOgS@L%;tj*7tedLd661@qqi(BOS1K0zA zx4>F|ZNM*t)wsGt;ybmEeAT@a={jJqfwl33@m>w#{|NjiSk3n~tx^taA|E@2fff-e}c6}`IqCy zHLy2WBCajZ>T5dnZ)G%&wfo7PB<#tsmcI({sQfVA=OOwV0k?7V#@_8CcpO#mD zQ(mfL(~m(ME1F)*LH%mIJp}(+Z(6U`|8JH44di16{HnkAfPWfN-s)e|EC0g~zT&E9 z{n`8}Ps>NkL(^G)5Bx^uYdx#3=3DE*>JJL3moWanz#p`dDD44z5UkZ#{x3+k5_khc z_?Bn&HJ$pm{xpsh^q+wJIK*Eq@TmMS-hM0FNaT7!NF2TKk1=tt2K|$PpABohsy|Jy z{L@1CimRTMllrsza}Y-b?8C6#ut&k31ltJP1lw1=Hlt_LTYdGf=`3I4x(>R}ts;z9 zU`t{D0IT`h4%h#$^iD;5{oq&qw*&tpY*;*+Uis&T@D*1*>(A;dPs>lsYnCZ5)v@WX zL>w!cUduuKYQ3$ys(8|R(|Wc3f2;JnBEL7ls=uMQ-U~L&zou9I$Pm8bs%QOKedTHS zXnANl%O4NFQTbZWs;l|dda(MH(2vUB8TD8Mdp7J6*vYUnVfTR@3v2!NrdMY4Y;bTvuOC9<|KHM^4Sz4es{aaH*Yy&QrdNJV2w!p4v;J)P zDNoB!%WHd6UaDi$AC5RyG`*IC`qg^-z|@=8tM&g|rT;hbu@HXM-vC^%2^;2L(<^_~ z5WeE7XZ=}yFkVxL{+_^X9AW&kz&{uEY1l7dFN3`i*5=2iSAItb zUvbs5{;d8ZCrY`#0eiT5dnZ__LO8T2)-0VhkGmfsCt zRKD`IL4IdJw=zWkTj)pSD{lhyUxw~#e+Z6$8hGp^uaKMh_~zVfu4JcImexmo?=p&ylh_9;^C=fhqNdp)exSKc3S|2Ej$ zO}d*x(rda)OuBnR_?^I?hW(!lCEkG-iR}XZWk~t~rwVUJ*h7(Cb!`6j0k0bNunQ#p z?XZ@=2)s*S-vw_c__6%k!0Un?bH3;=hPC`2@K!wy`8`{#*A8nEDE|kf`xoplu!ing zUYF6=bn4%xSA1i{*Mqn=JtsK60`Q{pmA4Dh6+youB>!837nQHPy^*dRx}72V!@!Hm zSKf4_n~(CnQp?#CXeoG6`O2G%bQ-tTgOT@s4_;Kh@{U3}ZFgD^R=*j%sC?y}gLGQ2 zS`SwL6!4<*m3JM|X}M?GxpA-pPg^UlE^pUT+4*t5}2wcYjrU;RwU=cl;-JFL|!1g`{k*NY|HLRibM z1+NYETeO41kor=7Blyj*3t+9j%q#x*4*zB9m!%2(b^NT>Z+?Z-1OmAEf~wfe_GKPq3# zVFt?MTeQ=y{}^08`+^shue?K%?s4d=9~=K6;6>#tuMO#nuwK@6w$*P2FDhSoe?+>K zp|5_d{u$s!kXOP*g9lhW%E35NX?=~N-VaeN$$)~HZ|9!8gniZbgwB{ zU&#sZTe`l*8&cO;NB_1Y7t+7AbV2u0HI*%u-jL+HL~T=LLozY1hSKU4iB?x9k_)So z>6Qe=TA%cWRJWv>nkfV^hgK7vyi_*SRTDc!!t~D&=S`HWnu%SVYG_C{(#@J=RqNbD zWm9G2+$5{G%XDiM31qM98f#N}ud=GDDLG%S>gyUQ1WD!M)e9;?{k1+tkq8BDY?bx- zdQ(cluaWiFl>OuK8-UFeht*fk6yAn@;l~%6ezX2@y7IityI<}q`;UD!Ax5`;{Lg<< znm*n?PKf8l0lea_JWtcvM;m+6{Wa)+O+R=*+@%6ZMOuwt{ir>V?*HOTU$%*)FWW>C zrjefG=QuTfz}{T?g|$_$6X`pVz8e9ne@(CXKis76*ep(n=XIdsPeB4s&qZO=A4wRy z&_~)kZWpI3&)ac3XFT;ISJ`^h>-qGHKYbkO<4E7h8Tb8YJ_>*xN7kR-+a)e{b`i7e za4i|R)^u{^d4Hf^{OOlnHrCx*cA2DGONp@RU*R+97f!EU3KvWI!o`BTQL0R_g}ImB z1$6CCzwBd4zwBd4U$b^@0_*>3lfL3dNni1!q+e4mGa_1l3h=wTe*g8OxJP(i{U~m2 zp7-h=&Sd_v{vW10oL<!W~k{kuD9u)KmNhe~D@T*4LT@Sd2Y(oh#|TJZ|~^Hw#Js;QG?ihwq&` z0$5)=hNS;?+Pcnh6=%@*W7CfaNk3!idM?!i`{t&|z4E-VA?XV)THmEVWwOsX{i^Lw z(WyvZqyX&2A4>YmpUL&8S8C@Vy-DtkxOGDZtx_lTp!YSMUh}*77UO$+Z7361dXwA+ zY`N z{%id!9%Kh_bp7Lbw+7H`vsoZMfYwcM&;AUc6@spd{PzaXsHW?kTznNz*D<;Hs-CV- za`Azlt~+w^)o2|N_e1XY0p9AgJ_y3s@O1w7CmP_b>1F11-;Dv@AW!FOA58VJmZ$S@ zF21&>^J^|%;OV@Xi?2iT;5a|zejnhio0;eQHwSp@c{+dP;_G`lFXiGJWagV(d_zy? zkz9NuPv?hRe6UxDaX%N|I5VE-;#eH|LHd1wx2Zwoa)2jo+1?!BZEhiVJ;2){1m7|Q zFATxA3cJPl#vQ`rDF1vy|FeGAkT_p@+9Ll+ zzcbzge6G9ec?DEZjJNL|cLVQr)*`WM3K&NXD(V-Da^ne!v$M(}ZgK&-~euA_Ix^zF6fL{c@&clBKUIARE zk@u9pXWXTqC^FtBz`Ma${A=KC;9szsBp5}LUQoWq5YFWs|Gnt1M(O!yKf*cw3dBE3 zU~g84PDKcQDD=Hl+$m_e9RYkvqu_MuezY7qfsX{P<@vZN&l8c~0_bb`UqOQXXkLil zIMylb37zYJw*g-d`Mn)@$4$c5dbtmH;mv}p&SStUfNur;r-64E{CVJy0M}LKtH570 zIDa;Yd*eBK^1 zf0*HyIeaaz?N4LHBZOuteuv<^Zswl~;GK61UIcs|aPMBh^)s3&<=>gh=j{@gj|ZI_ zMBY0_@%wUhP9og3^Tlz;91s2#j=!$d>mu@Er~R zAHY4}>UR|?KQ8Bi2HynuV574&@Ir$R2fnSr_amIkp`$~}VOx|#1@JE5GKI>8S_0li1KtH%eVzB-`u1K~mKoHyn{_U?hp1G2X6h25AoX$zWRM2#P8Ga>uov4 zm6*+M=N5unzY|0J7C=Y+{yoI+&+yy1eX!p);MVWt5WijEtKX?~I1^OAbs_kC!nxkN zb_kBA9k`8WMo2u};A=d$hWLE|emjbS{Z;_Ceh&=sy99jo+a2QfJNWgAgZ*~K1h;+< z4)I$69ratZYH<1UXJuUf8XoMo5V-YQ5#l!uzWO~V#BVeFre_BGEdp-+)`a+N17H1i zhWO>rpm2WUmBD^J;MVWF5Wi*MtKV-z{Pv@Xfc^GV2m4(D-1@B#@$1!yj`|%&`a%6` zBK#I5gZ=hE$NEi&_>DtH{T>(M_d@th&kOcj1l;;<3Gv$ozWRMR#P3(|+g2a!HxAtT zT^QoG9enk>1FgG*%6SaoT)*8Remg?^9u?xZ$M~%$eV$L-UX=NCFU+U+gv9eQ;wd;L zxE#8U7TlIYTSz>G(9v?BqaTT9>wdlt@%uCU7F-_ew`*~5xt$W?w-7q&cW-+57E}(?2rh zE!b}wxb=HOh~G}|)$g$(elO7c;B>Vz_y}Jl6q_-68`FSVw3pdKG_phBk5|dfIpZvZSJ*Q;- zwr=6?2+D4RyLe_wJX3%l2!79BbK^N2{0{8_-73&5RS9!$+)|&r~Nkn#0cknRXiy=ny*RVdl!oi$-19I z9gYh!+v3+_2w#WJG0IH)JdJRUv-?2NFG4)GfWMIaC=WoVvrhDBvE_cg z0>5y9;KP8IQiEZgINJGB&`A>R+B5o#*835JbNoeE*L@29O~AXce?J@eH^2+=96`&W zA3a`n<#3C{lR`Wr2zTub>(#-~nMOFbpHZeg91NZAJ4EM96x_wo>G(*>hc4aE67ZKC zApBL~_bc$@&j?@hyTJxhK3$gx|0U?`M!1XrVZrq|^LXGz=)XLi^3QzW_BrO!gl|Rp z()TXf9NY6Aa5#-uh=-c7`}rLF!lSC)b)mrC=JdFo>#<^X+#x3cpH4W(S@^!hxf$}? z2E6QY;SU7=Lc+QH3ty3Pm-@C(fN{3ziZ|B@#}hr)9|?ma^OR$;c=Yty#!ygpWG-R-1WP0f)5A(4#&p@nU@gflh9d${jZkKd(i2aCUJg& z@phw)*>CpeLi{??;WV#d9i{U^E#aKsIQk1^$Nd}!e%Vrqle(e%xtnl~-;0Ug_0V5> z6z_>Up0>AjX<&5afci>_j5pljxFAEx-A^g_Wte|Q0zZ^+*RGxuzm$FVbE45X66O3D z_ywp(jpt3ne;@px2*Z|V2V$PpdF?8~xg2_M z9(W@3Um&~?dGVWfL-22$PL_EFbOz9aXO6!c=PKGC2NTZmcjG*qrWNDH8_zf3#|h_n3Z{tu zj^N*E_*Whh%h?Lc1-J__bf` z3cL{KLRv3J0xvWCiwJl1m=KS%X#c=JHxbV3nRd*Z1^onngm8}YQ;a_v&x?d}xwRh{ zcf<>X?ybEw;(vUML!O}L0sJ$TaMtO>`bX<)4){elUz#Iy?>L9!g3KNG^+NEMnDV*X z@VD(R3I1YmjsIPPUyJASSHzvbcksLRHj=N7XC+^I0^gHx&Q}@c1#L6afOiAe z_1j^DyZ(iJ;;GO%4f^Tb;|`%=+5KD(yz8&xcQ)dDLHTC1Mxnq~jw9GRNW9>!H&#T=YK#{%69uzPiy4b)M}4zsSV7 z#`aPUMP}dnJHk0%X^aEtMqV0t;o4)J$0m#e9nk6iI_~hz5znK9yMBRv`Y7OUL#GS+ zx}I5M2l3msljKY1{gH&b`ubk2vi74@Q13H(9eOU(M}P2fdlUsJFP`_2BmhhO(3oad3JF%GN* zd@SKy&h1-_afp_4h0~$+i!43q@N1220pp2PnW;k-XBJVwgDQ0U&b(61;FoxA$U zjUl^=-|n%J-v@!05zgh=fpx6*yCm>=FAIMV^yd@q`g>CFTHx0xAL|_D-vxdL;=daO z(G9-$py&*S&JW;Mye{!qAv3w}KCR>C=+?so*IZMgfn!r{0eqwAc?UvPEYA*TZ0VMuPhpVnV))H|Hkht~=JT<}i-UYHWRgmn4m zY~V$EO8jdfUsn?D>g92XXP(f#yTD&^yYROL|8v5*y~WKr%rAsH*FHBD{fm&VG~w$IU*F%>_IxaK+MbsWexHEf^T4l|D|ij!{EN{~L+5kwyO)Umi_lpuhITtb@Liy@C*fR=OE7K}KxZuY zMU8QX6at?`IM-t#)}`8i>j>xgi{?w5Yl45G)4>Iq(-6;v(D!gYnE+)8;T%t)X;%*z z9T|l@?=!;LZ_n(wQz$`0*|b>j!Yw79zX6{HylA47Ln-1pkMMO71DntA`}Gc|^8}o? z3X2QAORqQG{H0`>dCmoIpGArZPZ=DX656-XRzz5O6nwBj?dX6;0;OBwApTnuX z@LW~v>j>cSxl*2AgMS0zu3pSK@F~JwIhcLiPvCc9{8y8k9$1Ex5cz?Mu40znk zztahK^T=TmkLsKWyzRcY6F3|CuR*6`q44#*ViFYy>$EQx{!H)>C!FI?oAvW`z}pSp z4V}(IMc?ZuLhDjPb>;J{gao|kCoj#r~ zOoYx=gme4sFzcBcm5+I3L+~FXob$V+OX44a_zOoP&Nl^r9{iDnv)>}@6Scq00>2&c zYx}GM-fi}wrvXpjFZwg!_j0FC<0|^af$-Z!IOnU#jBoEkr|bT>V@?6T{~i*5&#_W& zTArH|&iO6$#P4G8_W-}6T5yd&0lf24!B+?WWaxBlD*7e;)zHzl0?7MT7%l5p0EqrGhczcZmzaJj^zK z@Apr{=Vx`@(g=RX<>Gfo=qz?PF34zoy-hgB6YmjyJr7)KZ?uP(L`TbW4B;GS8S*J`!ig5Yc(KpExcA zUU;nF?;@UA4#x!<>JILw3Hdn<4c`)F;SClb!%z!IGQ zXgfa}{0{TngtO1%Kpg9qV?6nJiExgy5aXNbd`7ryw;2D|0sj}m*Xh5Ox8q{TFJNyC z%BCABc#R<||G($5VjkJGwtw1b*jAqI1IW zy)eE6{awc;-cGvme>3g`9!5N?kCXO%iW!$SV7&i&-bC{`!L12r{Q@(7#tCQrB0R@G z9r|U!+ps^n5%{sdJKH2*dd_gA;-=p{4ZOh27yktAVc(?l*RJCw&UVDPDf~_%+}}@M z1zzWHYKILHk8(~0z69f#;*S!}_1IP_{1c$FV+qO`?V$+xRKl6xb)oR50>7MaF3(Q1 zb6s!T4xJ?!pU(vUY3LM;rV6E>mws_ubltT# z;T%uf$AZV9KNEPDDbE(dUBBoNrj8F+LO=ex;6K3cy}&!M4qqGi-+*_S`C^xSrM?Px z6rC**&wjvh=_OZ^-+u3K>PH7iIcx<&2k^8RhyRCgj=y6=$!{9`=LzR>C_;WW0RA!f z72`!;*TV-=2j})*VaA`c6iIQy+I=X)Q3-|h*&75qV^5>F5I7dk#~OSra4 zrs{fNw8N=A*Grr_9ySxsev4+uo$}W1U(Y*(a30U6ne+Thjs8{rB*CAc-)+{7&jN3+ z7Qd@PzlU)4+qQ}1OV^WY?kD9|c0=4T@4|q%A@FW|&PDUPJK?O8eoOSV{f`6Q{YSw+ zgx`al4lc+{K>Uq_bNv=RDe>$G%9(_7IaFXB-VOXBr$h7UHF1Zm5Byf6{{!$x3|e-nyyJVp4?_8DJ6YoFG4(qQxOYX|Dd;+?4tNLp*L3K&18>JVXAR)@1CL`oJPY_s zguD7JmH4+FAUFE$pWDuL-rSOKE}w3!S9Sax>G-r?xI*%EDD)2|ob@}G#T}x5E9fkv zKLh-$pr1D5!2N_5(DwtJk9iYx?PDzF}&2c2CB_xp{Zfu7{}8+%>meCag8*>9oQM?6G0*KZrn{|li1 zBKRF&N<3!+UuBw@&J!4a(!jg^Ab838lHf|hxjn>jzN+)@!;TM8GTOfeQb%_C3(W5~f-(|#8sp&x zz^e#n{fwblS}OV1wpJ{Bi7a?}XotfwyN^1it{~ zruBX|_?~J1pE?|(WMcR=c96`!_hP-Oj1h!$JRZi$4{>8Z!nyqv9wX)RXg|48qkOz4 z|1tR2IKHZS-sj+d1|1LUH*Ih09L)OJpH=$H@7o!?4EP>|b3LYUF0mn!&TxDx&o+ru z&l64p?qNOC4*vU2XJdNNrrIHGz&E7@9_P0J>k5r+0^#iUQ_QEUfnV;n&+MFaO&5W#~q^Uy2XZH4E|%__ly$$0^sie?|MMunT$Al2J!ku4?1KN)^ ztCaoJ4Q9Q!4dJdHKa}!Z2Aw^D_uyPh=lA`b&S0!rsD=k@Rs;Mz`phE5#kLb{)dRf&E9$_L%WI~aH;o(oIvJnu}xInJ&h z<1P}7^AW;%zu9e`XT7NW>0=zfe?Q5<1nTDg_I8HA-oXy1{*H5oj9@ud$4|}0)J=1 zx%|t_`A;1D_IXkcbD=ZG;aR_}E6MLE@GG!hLh*aoKtJ9kEl11$5yD;jG3EIUbUF_d zzk>&e{;m`NmuDRF&W8wLAHrFu!{{7LIOnU|ya%|D@Sy$mvCwHV>!mvhXTN2Qap!S! z_De>sI_b~K(aIUL=WA-QDh1h@TxUo^4*RFy; z1AJfL>CtgV)b&OU;VxgJBp$7=lL+T}@h}b#Mn=v99!Gth0{mLw6_`hq{}|!^aX`!Y zHNx3%`;ijo_t4p!4r*Nci3zU#V!p#cB179H_j4}zZQDWz`MufjR|5VR;q12)>wn!B z{{XxQ=epZNfBVCv96Is*YiHmy2zTSgVYzW0?{JXFOu(-l(CJ3`==^dw@WKzJ{58MN z18>Lra8u}fVf04>->@EW{!MhS&GLp2&T)E}rv`#w>Tq0;Srxw?4StW|Uu^ipz`vhx zF3&d8uiu1D_e?1tnm*jmzX<1cRfP7V>%cV|fMa~qHnTP1`a28UBr@I@@C#8s1;7si z-idPtEr({Ov!>^n=R4;C@4i*yOrf1TO*rSPt4452>3-e?KYfwls{kL^DCJzRi{KAK zXJf)yCq7AV-Cs8=e~jSQ^%tR291c-31EBL1_!W=F-OYN8OWy-eHwv!pWVMvccb<8l zWOKq@d%(I>+r!Sl3rj?YKi|tgyE{Dp`*??g--hy;B6RN%!g*bKhFPao5zggMbhPAG z*9mP-2Z_n_;Ma4ZUx0J$J%B$9ybPb0JW!KXSJu}j<~F1n6U{A^O)Xw^Q%iG8Yi;e2 zYA=x(H)HDb#N^WQ8Hog4jnBE-Z(QaU|7DLvUCqKFBVxlwdi0k&()G!fWDQ>qr*t)` z#N7H+Rb_p`{RxmnW$Qvu{&Yx9YeT~#^;0rsymK>Q`k4Jo5_-jM^!Kcg`;#J0sJfws z|HKD{TV0>5Y)Z_no1bj-7BtnhBop+C(%&DM>!mBJ538J;Oz>YGvFY94 zVQH+a_v%yRg3{9;SZN+IbkyikBH1{knhJE^$y3LUnVd+JCYldR*0YP&>K4j>OLc9c z$V<#AZyA+H)TP`XT}jM|6}xB}6UnBgRMYfH<;_GH7E5qVwbWHp_Uc;dD(mZxpew4s z%Ep>RO;R-Zx~Q%W(lOX#N*^0aqRA%CQ+28)NvbWe(N&erq?eKcqJNW>4W-p|BUU|+ zinOJvvaY4Mw7R6clKv@ifk+`ysnef&na6*cg|g|2&HV`%`8`IrrFc!|-QQ%PP>L2U z(tyh*Ro9h`oISgI#_X~Z{$(=%;w@T4h2BDjVrXoxr>N=o>UqiP!^qYoYvpEBF+tG%95hu?Et`g9OC{Sz z`pY_rImJ{IiTYG!4ZE+U_UP1HL|LHvumlyclgyA@K_nUzTv0Bnd8zsucjbK5Wd#@1 zwN%ebP`gOdT9&-I?D4-Mk>jSJav@V(JItjf()4F@3{*n}Gmo3rFuGAoe;lY@S}fHm zqs4@#Q)$f-H$g^Po978ct<+UXvaz|fDd{VK>ci%%Do27lWwGIuJR5FHvTRhMbZTNk z>EsfkkE%|!Hnt=h>s>P?W>afDHTBBIL_;MPKr&TN@w*Cl@Kie4NY$5aZE@qDD~o1-R)4ujUiu5>)w#agH0U4Y-EfP}xZH5{+)QPGhk&O?fTZ38cTD1JS7(SCZ*_zY}i6wF>m}a!9y57}BjCG{m<%T9m zF&o+Zy5>4wnuJt+Y-Cn(mQY8zB(CoL?a8N!*^^YO1In-1*LwGrc{bHFj&on0qQPm- zlwiVe=aw3OUQSD5qZ5g;=_NB}%uY<0Ic408(y3Eum6Xk+FCHGgYzvrDT%65hHX=8~ zWMT@T^f@Y*!QRAd(+J5S4{XgYKRj-^F6_q7{2pxkXtu4zhI4D?-aAzmWWGKbpi{)` z>`0XD&JjlSuqNiyr16Coh>MP(b*YXoHgvDibv6ETdqMGKJ0kMv(P+hD`Q(waDw{at z(F!B5Ez4Md@*thQO>@mHP1TK*i@XLI8<8z$5+B#t;$n=s;udc_4!SYWS6_*0Ql#Zq zkmAU!;y6+u*777BLFP>gF58o6fiy1_)P-UrvR&u^B2DG_*|mvS;CK@lVpak}4Qb5x zd3TH2pfP-kf0!D_OW$OZOPk>Jt6LaVHZ@i1dNn~4eq$Z&R~Sk*)?_C6sS_rYm&{1a z7&CTq3Ge1|6Fa6qsaqZr&F4TkJ^-BYv!{&NzjPdpZ&Qk6G#=HqHdebG3H_d^re$*T z_!#Atml*!;Kyy=)qDWQKWM5L2Sp<~DVpFNAc?-gS)G;wnZF})rsa*Yg4rgLQ2AE_@;cC z?_=b!hQs7t8LfIay<4zxG?`-I*vj);BE$0cJ%Bq(<#vf&vKr_k(bamK9JXu?TvUdqMo%b8m&^SC4RFe263LTk{HGMfE( zwc40Mc_)g86b~8B&7XIzv@qkX?UdqSq)VlrU-Yg4HPi6y#!g2LkJWAPkyE$+sm=Au z8TC#5=u??Cntw~WTdysR!mdiyEXovdtjITDyFzw>^Co|K7452%$x_-avt1=|wKs3jU{L+`em z4tbPfGUSE}HE)-HWBRo`4vXZvJ; z-uNO6D0vGeY0b^$cBSc-h7?<9>2m=`D*o6|Df~ohW8LAcN$xx->45%@Rz-t>=^C!R z_qJB@ttMUf{0(+!i9~%}RW;3~t!}h;vc48V#TTiLXOsr7WWDUB(@GcfXreXGhjY{r zXhTdR9u5D#ogM~i8twAubysS>K`Lj*6dOk4i8|ncG-t0NtKAu;s!13b04;d1;&#X&0QZ!`$7yp9_)4sU@|JXc5K&G7%d1LhK~a>LL8V3|F(OeuN`@S_ zi!685JFd5xXw;%b&2$!5*O+sRLz9ee^vu?X9#5px7y?`g_U!=EW^zhwxLc)g)5%@0 zWGBg3vHx&@PX_Wv9tx7D;-XPzD$a~mzNSNRzP*5N^&=ty3z|_BT&6`yQ_+crddg_} zquYT?4VyDeWi$--)(c`IXqs`SbX9aj$Ycf-;U8A>k49}s zWs#f4Wd7#0LV_nao!mx|SITE|(#>sjlqNH!o7-GEud*uD)Pg$1I)djMyRX#9T>Fy( zqX{}&$8M==N|Xp%s60_nFLAw!3oXCV5-qZ9^~Q$b`3E11Q8%TAPb{s<#=p~SK_<#-j8Q&^VXf8ZuCxdk)JP$!r36 zcJ}lA( zr>Akgod7oNl!2Q1vVg%LC{x;<%DElVa(zCv!@Oa{KX9&S6I}k7NsZKNNww0Rw<*8k zN7GV}cE_}r$PNRs>|B$%E5(^ySeHNY`g5>C`x(2SreR=YX56RaE!q~Rn%vk=-H4ak z32w?1acyN?eSVg5n?@!tv|-T45}9Ydhqu->CFzKS`eoQTDc_v6oSRd4mp?CASwj!) zGV6y}cH^v5HSO=*(l0lLVy`JpkKg6dDL*=PvFPC)T?CxqXV>j{+brrv(h>^z@&$A$&*`c$|W+ zOcR285M4{hw53z%WPWaAD;3~EYOl0F;in{gJ|XS3teSR|CG;?G?9_5g8gsFG zI8DPzb2SA+59nH&8mk-9dCeuanqpi{vS#60qJpkZRp+Lu+Eg! zB)UGiP+przEy_QmwCyx+t`1mtP|FMI99*_x$D`b-at8)}FQt;6TFSwqs~7)7>KkDE zr(IlqK{HX*T|JGw+0&$&Bsp)O6?@1bQ*Jw?u@7yQUX^L4?Obh&b`MSTu0)cej}`OO z;RbAa#xLcZbC{9oV56vACX$ua^LT4J99vs$qxsugHzSl$FQYa+nw}-nFdXBPGdjs> zpq&M+jAMzG()s?DNK;5j2|DBWMESS`y$?X=l&*~JCL;23JiBBb&*%DyIrTi;(_n1Q z5xf?9LnqNd2X*YPym=Tc(9)y1zq+>yXrYQOnp?WW&3fRGkWy1S%X={4GLg44jhYKy zD-E;TSKmH~rro-^jkdupZ(O(K>EiP@WZy{%J@iU8WgZ@6PpX%*Uy;d(3L@a-&AIZ$ zw!GsfIh64)w0n6If%`HaRtIjQinFg+3w*|)YsTE+uZV{~lsi{F&5eER zCe-N5xZ!7dNpWLMUN7N8)ACfNbBs%rkL779@38Yx3%ZS$i5cXl;YPXD;PBJX$0dq2K$^_Dk2X!D}+F@oP=Jf+f{ z&nJaL6Qy(-GL--xyz)k;z$p*jc%aHYp)GgMeDkI|_nykUWIer=6d_$S?XKGMf64{uZI&fEhJL433~T%Zjy>2H`j|6aeUHD zibo8iQ@`Auhn98N=7GC|dn?2ybdT30M(JGsr%E$sP-Xne^UmH|4=rf-r(JKf1 z#Z$VZp^Dqoa)z$VXq{>f=PblQ|Hc8Vi5n%|4WN;^2<_HD7KZeu zDm@CVYfQ8@%gF-`j`sC77A)^A(}6RJ9ZGZz%3mp>aPfX?Y37lAtk^$OmB=QI%c+^h zs|LR04P;&eEO+mC2)coXXRPjBe;>Uw(PnCov={xNRA zF4@%&e>edp=X;)!*D4&hncsn+rtCg9FlP$oI5v`VOzW*C+E_HEc*5f0tRW!#qj_j* zs+&76A39c(~+;6z)?Dm}U4xfF{=b0ttBDGD5cY zxh=vq1zTOkBQvMF{s(z-OyYm!?YgdiG({JpL%`s7o4B%hP9BCqkl%B5qaYVUFOOHv zxE-a9``d7Am~Y^dmsZVo(~N;UP`P~z#i-SmJ+*XM&p$MC1wfy`p#2#$ievP)PD>MZ zK2-Gb$kf*+eD~2N61i_|@k4JEmwPQdT5$y*kZ|+iu&|Y;_hZPRdM)xSXkrfLzUc#IXisY87=C=6--CjwVJ+z zo}KOeot5{NboP_?aSLt_VQ*Q{sVp_W2(MZ7`LP6TOKCM#ThE_tSO6_M#pc+RF0kIINlB9VzM`zIx4h#fDpl|6Cns?wL2mef^54`~ffC z$MMjCeO&f=8{VXq_u_aVC5_w;+%(PdAWCY~THOp??q zZOc8fr2}!_YkczJFGnK-Y2X16-5#-M5l`!7l5O9j+rIgst5wd1e{9#q?<_+3A@QGAkxM@gU4l<;_$bw_mh*;Me~srql%NG+Zk^M~m#n9G zp_}u*NkFyZZ9d;jsIQ31(e4zjv*M!6qQW;3^CSA~aAjs-Gl_8<;c8OF%m|uWZM;Lx zu1=RT>gDVMa~Is)V5!6dPaX3bpJbE=o&49CaQa-nyg`CCdff5p{A9j+!bu+lqbbw6 zDIO+ogeLHk35{UBl|6qhkVfk41BtpueIUV`>nxsm?G>#gd$JgKD&>22A}x^D3G#5( z^uZ{f*x+#~d}SE=@f%%L(lc2*B)U@auOxIR2&xU=={%L5J0j*4k=c ziFW_;`$N8uRp6dBG_I%o*K9CUl;-V)XOw1#GaVrKBlc9`vwU~B-26SEINE~8oIh$` z!wXH>vkrRup{6;NpwF+@(02r|v%IFDWa^e!g68ebW?eRVy3ECd+a5NMT5MWG6DZMW59QI5x>S`HBsdBQkva z%nhx9eT+-no{+_|hx2ZclWAOZdYm04DKHvx!(M-MVdr)a>go|@Pkz8e(-i$j&BA@v z1-^EImwMsevai$YPSjTG@mpQ}(>YNwy8@el35QEFCoT41vosbnO@`c54{^bYgAYLie6SUcgv|=gtg#I494{ zg5OfGZL9A~cX?Hg-f2lD=GQf~v{ur)hrXQ`y>3#S&VMO7G^f!AA-zGjs7)+Z?Qy8hF^Xqz06EQ>v4N_e0QI=YJ`WKK$)gTJ$n5fB6NUgICYXKV-}qhTL|VUJ{hCCvQVe?;qul9ziVx zLj;{)(d%$Hv&hc4+UaFY;5!b{g}9yaT*1@cwz9@<-05tI2ZrEdlgyiARH5$6WcX`) z_-O|n6VQ5yzhWrasPjtpQ3=1(;=aHr>{Cs&&7>_6zs45u@dEnnl3ix!E;>V>OJ`q6 zp@SHHcZt6Ki5IY;$5i>RLBvL42aM6n_fU&FZSD{kp&Nt`tavRgS2F4tlsoqo5A(k? zOW#Sv&DM>*ZVZaiC%X9ie&h(0Kd0T2j_>lWVJ~$45 zIUVGyIVn=?W4IHFf&VvAhu6pq~Hnu0&O{`|2wC);8WT7LU%pH19qg?7k7K zDM??vR@qpctO|LJlTJCOmnZVOfPZs9FSOCKMH+x*=fcnY`|@h0+0*+UwB~)R&dk%4 zv*05D^B^L8gs0Kn{|W?6ghT0V7XIWluN3{Ct#`4r~cdVbk zsGX5}KFhBTHP9Fp4-<@ zT|bEWiB)<!Eoc`lOnqqh~&OqtmmTcZ>o~8^S-NL&1hWY|VYWmxgTrdK7EX z!1n=V$jVuX%P8dkWGp|O%o&wvC|dsZwAt-W^VhAI@pUjA zxxU%OU(v~0(!KoeXY}EP18%bUyOBt5B8BeyGAm}gcgUU+<$S_Z&f&79&Y7~Mp8p;} zUv^wnXF+Gmx_9#L8>6+>2vd`r54fUvT+^3r>r-=STyo7I?=asyHq)cFz6TfZxDUIU z>>^LA0ynbz(O8juxgrncZ;Q0-`E<}0tl+871 z52L*{e*%e{K8?xl{qF|$X?j}DQkgn&l-FruZe2kiwrr&rG^f);0n&9Z1UoNf^PQi- zy>rl^pl?+W<$F6_7J4}*pvMvP4buE}P_g^41AX9u_pWBRb>D(U&ENM#BGYd3j!Sjd zZHkX}9=*-^nFXR7h4_JY_Uxj!uZbB-i;{rlU+xk&`;s`#|JiRytF5l5kL=~H{^%qp z;KP;fg&eFM^1cYnw|c;yIWr~(jd8h~ez)DE6X5y3$#&Cx$4(!=sWSt`&bXnGx=+D|8_=3U~`z-_k-)84P9$9ePb+#>Yt5lO$E+g{=vt$ISYEf zF!b40rY!_`m@&&&lGN-odo4O-)Av7e;N`sjVdb?un)xWwk}mfdHoAOf!Rs%T+}&gD z3A6hc8Vw=zjnwop`J4v`bV5d7mCbt>{(kgEH+51*>5VV>Fj5tLJGdJvO`|buBlp3( z=F0g}yS~k_TYBOtC!IRF$`5?IOP<>24Eew6i9u#4b1zoWYQPni|DzK5qlFHsc&(St zRmu~|hI9+PU7zHy5>GWz`!&Z1zsWR(V0n6?!|?ffq?^AFaGgFo3o+YO8{@hS^0x|K zU!e{uhuXd;59Wx~ed3c3_RaAsy$wezlSpG8)@v%?LJ_W(iNHwpL=!AfVkiaiy>(i2-x}tprgG(^%jRfhFZUn2MZxy09 zg964ddSf);yU2W>aLe4~?SXt&!}X%_+}C<_05C&!lxKm|1fqVXi@)Q{{RxT`{Urwa zTx;%!xyCsnR?>3QI4E}~oJMHWeJL>m> z#)4MZJg4yKy$nwCy;}FDQE~^6+)Qv2@NWzFA(8udB<&XLgzC6?gM_O)I+Mx&k`9*6 z+c)<98YsPY67p4<6hO!UhVSJzv;PV`oT0;kT78z~p2YD961|yD-foSk&MO#`5*QR9Ze!XVp{&GGhT}FZ-e`C>L@SJa$)9wP%$>b z6}|5`R|6YXk34$Bm;e?JjadP{3zNef>Xhu#GrH==n-{$~}dqmtITCX4M6JwR64~-K0^d#WX$BnapAIif$7fz~vQ< zuCsl8gFfVf!8iPgm3BHDt@*lHc~DuqhaU2k@!SyeqM+A)gO5RS3(@x!C3yVhb7#B> z>D#u^L=*AxntxlR%cs1T%R`SgeIuGoZ8sH EA0OQsmH+?% literal 0 HcmV?d00001 diff --git a/vendor/riscv-isa-sim/tests/mseccfg/mseccfg_test.ld b/vendor/riscv-isa-sim/tests/mseccfg/mseccfg_test.ld new file mode 100644 index 00000000..2b3556f6 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/mseccfg_test.ld @@ -0,0 +1,79 @@ +/*======================================================================*/ +/* pmp testlinker script */ +/*======================================================================*/ + +MEMORY +{ + /* + * default location + * NAPOT is used. Thus size must be 2^n. + * For easier handling, esp. when MML set, trap handler has separated stack at GP + * - Currently allocate 0x140000. Just avoid overlap with _end. + */ + M_MEM (AX) : ORIGIN = 0x100000, LENGTH = 1M + + RESERVED : ORIGIN = 0x000000, LENGTH = 1M + + TEST_MEM (AX) : ORIGIN = 0x200000, LENGTH = 256K + + U_MEM (AX) : ORIGIN = 0x240000, LENGTH = 64K +} + +OUTPUT_ARCH( "riscv" ) +ENTRY(_start) + +/*----------------------------------------------------------------------*/ +/* Sections */ +/*----------------------------------------------------------------------*/ + +SECTIONS +{ + . = 0x100000; + __global_pointer$ = 0x140000; + + .text : ALIGN(256) { + *(.text.init) + *(.text) *(.text.*) *(.device_code) . = ALIGN(256); . = 0x10000; } + PROVIDE (__TEXT_END = .); + + .tohost : { *(.tohost) } + .rodata : ALIGN(256) { *(.rodata) *(.rodata.*) } + .sdata : ALIGN(256) { + *(.srodata.cst16) *(.srodata.cst8) *(.srodata.cst4) *(.srodata.cst2) *(.srodata*) + *(.sdata .sdata.* .gnu.linkonce.s.*) + } + + /* data sections, mostly for host to deal with input and output data */ + .data : {LONG(0xdeadbeef) *(.data) } + + /* bss segment */ + .sbss : { + *(.sbss .sbss.* .gnu.linkonce.sb.*) + *(.scommon) + } + .bss : { *(.bss) } + + /* + * thread-local data segment. + * Copied to TCM at start by init_tls(). + */ + .tdata : ALIGN(256) + { + _tdata_begin = .; + *(.tls_start) *(.tdata) *(.tdata.*) + _tdata_end = .; + } + .tbss : + { + *(.tbss) *(.tbss.*) + _tbss_end = .; + } + + /* End of uninitalized data segement, as the start for 128KB stack + TLS */ + _end = 0x180000; + + .umode : { *(.text_umode) *(.data_umode) } > U_MEM + + .test : { *(.text_test_foo) *(.data_test_arr) } > TEST_MEM +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/pmp_enhancement_sail_spike_unit_test.doc b/vendor/riscv-isa-sim/tests/mseccfg/pmp_enhancement_sail_spike_unit_test.doc new file mode 100644 index 00000000..dcb42422 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/pmp_enhancement_sail_spike_unit_test.doc @@ -0,0 +1,3409 @@ +MIME-Version: 1.0 +Content-Type: multipart/related; boundary="----=_NextPart_01D7437B.526C0BD0" + +This document is a Single File Web Page, also known as a Web Archive file. If you are seeing this message, your browser or editor doesn't support Web Archive files. Please download a browser that supports Web Archive. + +------=_NextPart_01D7437B.526C0BD0 +Content-Location: file:///C:/2AEBA2D4/pmp+enhancement+-+spike,+sail+and+unit+test.htm +Content-Transfer-Encoding: base64 +Content-Type: text/html; charset="unicode" + +//48AGgAdABtAGwAIAB4AG0AbABuAHMAOgB2AD0AIgB1AHIAbgA6AHMAYwBoAGUAbQBhAHMALQBt +AGkAYwByAG8AcwBvAGYAdAAtAGMAbwBtADoAdgBtAGwAIgANAAoAeABtAGwAbgBzADoAbwA9ACIA +dQByAG4AOgBzAGMAaABlAG0AYQBzAC0AbQBpAGMAcgBvAHMAbwBmAHQALQBjAG8AbQA6AG8AZgBm +AGkAYwBlADoAbwBmAGYAaQBjAGUAIgANAAoAeABtAGwAbgBzADoAdwA9ACIAdQByAG4AOgBzAGMA +aABlAG0AYQBzAC0AbQBpAGMAcgBvAHMAbwBmAHQALQBjAG8AbQA6AG8AZgBmAGkAYwBlADoAdwBv +AHIAZAAiAA0ACgB4AG0AbABuAHMAOgBtAD0AIgBoAHQAdABwADoALwAvAHMAYwBoAGUAbQBhAHMA +LgBtAGkAYwByAG8AcwBvAGYAdAAuAGMAbwBtAC8AbwBmAGYAaQBjAGUALwAyADAAMAA0AC8AMQAy +AC8AbwBtAG0AbAAiAA0ACgB4AG0AbABuAHMAPQAiAGgAdAB0AHAAOgAvAC8AdwB3AHcALgB3ADMA +LgBvAHIAZwAvAFQAUgAvAFIARQBDAC0AaAB0AG0AbAA0ADAAIgA+AA0ACgANAAoAPABoAGUAYQBk +AD4ADQAKADwAbQBlAHQAYQAgAGgAdAB0AHAALQBlAHEAdQBpAHYAPQBDAG8AbgB0AGUAbgB0AC0A +VAB5AHAAZQAgAGMAbwBuAHQAZQBuAHQAPQAiAHQAZQB4AHQALwBoAHQAbQBsADsAIABjAGgAYQBy +AHMAZQB0AD0AdQBuAGkAYwBvAGQAZQAiAD4ADQAKADwAbQBlAHQAYQAgAG4AYQBtAGUAPQBQAHIA +bwBnAEkAZAAgAGMAbwBuAHQAZQBuAHQAPQBXAG8AcgBkAC4ARABvAGMAdQBtAGUAbgB0AD4ADQAK +ADwAbQBlAHQAYQAgAG4AYQBtAGUAPQBHAGUAbgBlAHIAYQB0AG8AcgAgAGMAbwBuAHQAZQBuAHQA +PQAiAE0AaQBjAHIAbwBzAG8AZgB0ACAAVwBvAHIAZAAgADEANQAiAD4ADQAKADwAbQBlAHQAYQAg +AG4AYQBtAGUAPQBPAHIAaQBnAGkAbgBhAHQAbwByACAAYwBvAG4AdABlAG4AdAA9ACIATQBpAGMA +cgBvAHMAbwBmAHQAIABXAG8AcgBkACAAMQA1ACIAPgANAAoAPABsAGkAbgBrACAAcgBlAGwAPQBG +AGkAbABlAC0ATABpAHMAdAANAAoAaAByAGUAZgA9ACIAcABtAHAAKwBlAG4AaABhAG4AYwBlAG0A +ZQBuAHQAKwAtACsAcwBwAGkAawBlACwAKwBzAGEAaQBsACsAYQBuAGQAKwB1AG4AaQB0ACsAdABl +AHMAdABfAGYAaQBsAGUAcwAvAGYAaQBsAGUAbABpAHMAdAAuAHgAbQBsACIAPgANAAoAPAB0AGkA +dABsAGUAPgBwAG0AcAAgAGUAbgBoAGEAbgBjAGUAbQBlAG4AdAAgAC0AIABzAHAAaQBrAGUALAAg +AHMAYQBpAGwAIABhAG4AZAAgAHUAbgBpAHQAIAB0AGUAcwB0ADwALwB0AGkAdABsAGUAPgANAAoA +PAAhAC0ALQBbAGkAZgAgAGcAdABlACAAbQBzAG8AIAA5AF0APgA8AHgAbQBsAD4ADQAKACAAPABv +ADoARABvAGMAdQBtAGUAbgB0AFAAcgBvAHAAZQByAHQAaQBlAHMAPgANAAoAIAAgADwAbwA6AEEA +dQB0AGgAbwByAD4AUwBvAGIAZQByACAATABpAHUAPAAvAG8AOgBBAHUAdABoAG8AcgA+AA0ACgAg +ACAAPABvADoATABhAHMAdABBAHUAdABoAG8AcgA+AFMAbwBiAGUAcgAgAEwAaQB1ADwALwBvADoA +TABhAHMAdABBAHUAdABoAG8AcgA+AA0ACgAgACAAPABvADoAUgBlAHYAaQBzAGkAbwBuAD4ANAA8 +AC8AbwA6AFIAZQB2AGkAcwBpAG8AbgA+AA0ACgAgACAAPABvADoAVABvAHQAYQBsAFQAaQBtAGUA +PgA3ADwALwBvADoAVABvAHQAYQBsAFQAaQBtAGUAPgANAAoAIAAgADwAbwA6AEMAcgBlAGEAdABl +AGQAPgAyADAAMgAxAC0AMAA1AC0AMAA3AFQAMQAxADoANAA3ADoAMAAwAFoAPAAvAG8AOgBDAHIA +ZQBhAHQAZQBkAD4ADQAKACAAIAA8AG8AOgBMAGEAcwB0AFMAYQB2AGUAZAA+ADIAMAAyADEALQAw +ADUALQAwADcAVAAxADEAOgA1ADgAOgAwADAAWgA8AC8AbwA6AEwAYQBzAHQAUwBhAHYAZQBkAD4A +DQAKACAAIAA8AG8AOgBQAGEAZwBlAHMAPgAxADwALwBvADoAUABhAGcAZQBzAD4ADQAKACAAIAA8 +AG8AOgBXAG8AcgBkAHMAPgA4ADEANgA8AC8AbwA6AFcAbwByAGQAcwA+AA0ACgAgACAAPABvADoA +QwBoAGEAcgBhAGMAdABlAHIAcwA+ADQANgA1ADIAPAAvAG8AOgBDAGgAYQByAGEAYwB0AGUAcgBz +AD4ADQAKACAAIAA8AG8AOgBMAGkAbgBlAHMAPgAzADgAPAAvAG8AOgBMAGkAbgBlAHMAPgANAAoA +IAAgADwAbwA6AFAAYQByAGEAZwByAGEAcABoAHMAPgAxADAAPAAvAG8AOgBQAGEAcgBhAGcAcgBh +AHAAaABzAD4ADQAKACAAIAA8AG8AOgBDAGgAYQByAGEAYwB0AGUAcgBzAFcAaQB0AGgAUwBwAGEA +YwBlAHMAPgA1ADQANQA4ADwALwBvADoAQwBoAGEAcgBhAGMAdABlAHIAcwBXAGkAdABoAFMAcABh +AGMAZQBzAD4ADQAKACAAIAA8AG8AOgBWAGUAcgBzAGkAbwBuAD4AMQA2AC4AMAAwADwALwBvADoA +VgBlAHIAcwBpAG8AbgA+AA0ACgAgADwALwBvADoARABvAGMAdQBtAGUAbgB0AFAAcgBvAHAAZQBy +AHQAaQBlAHMAPgANAAoAIAA8AG8AOgBPAGYAZgBpAGMAZQBEAG8AYwB1AG0AZQBuAHQAUwBlAHQA +dABpAG4AZwBzAD4ADQAKACAAIAA8AG8AOgBBAGwAbABvAHcAUABOAEcALwA+AA0ACgAgADwALwBv +ADoATwBmAGYAaQBjAGUARABvAGMAdQBtAGUAbgB0AFMAZQB0AHQAaQBuAGcAcwA+AA0ACgA8AC8A +eABtAGwAPgA8ACEAWwBlAG4AZABpAGYAXQAtAC0APgANAAoAPABsAGkAbgBrACAAcgBlAGwAPQB0 +AGgAZQBtAGUARABhAHQAYQANAAoAaAByAGUAZgA9ACIAcABtAHAAKwBlAG4AaABhAG4AYwBlAG0A +ZQBuAHQAKwAtACsAcwBwAGkAawBlACwAKwBzAGEAaQBsACsAYQBuAGQAKwB1AG4AaQB0ACsAdABl +AHMAdABfAGYAaQBsAGUAcwAvAHQAaABlAG0AZQBkAGEAdABhAC4AdABoAG0AeAAiAD4ADQAKADwA +bABpAG4AawAgAHIAZQBsAD0AYwBvAGwAbwByAFMAYwBoAGUAbQBlAE0AYQBwAHAAaQBuAGcADQAK +AGgAcgBlAGYAPQAiAHAAbQBwACsAZQBuAGgAYQBuAGMAZQBtAGUAbgB0ACsALQArAHMAcABpAGsA +ZQAsACsAcwBhAGkAbAArAGEAbgBkACsAdQBuAGkAdAArAHQAZQBzAHQAXwBmAGkAbABlAHMALwBj +AG8AbABvAHIAcwBjAGgAZQBtAGUAbQBhAHAAcABpAG4AZwAuAHgAbQBsACIAPgANAAoAPAAhAC0A +LQBbAGkAZgAgAGcAdABlACAAbQBzAG8AIAA5AF0APgA8AHgAbQBsAD4ADQAKACAAPAB3ADoAVwBv +AHIAZABEAG8AYwB1AG0AZQBuAHQAPgANAAoAIAAgADwAdwA6AFMAcABlAGwAbABpAG4AZwBTAHQA +YQB0AGUAPgBDAGwAZQBhAG4APAAvAHcAOgBTAHAAZQBsAGwAaQBuAGcAUwB0AGEAdABlAD4ADQAK +ACAAIAA8AHcAOgBUAHIAYQBjAGsATQBvAHYAZQBzAD4AZgBhAGwAcwBlADwALwB3ADoAVAByAGEA +YwBrAE0AbwB2AGUAcwA+AA0ACgAgACAAPAB3ADoAVAByAGEAYwBrAEYAbwByAG0AYQB0AHQAaQBu +AGcALwA+AA0ACgAgACAAPAB3ADoAVgBhAGwAaQBkAGEAdABlAEEAZwBhAGkAbgBzAHQAUwBjAGgA +ZQBtAGEAcwAvAD4ADQAKACAAIAA8AHcAOgBTAGEAdgBlAEkAZgBYAE0ATABJAG4AdgBhAGwAaQBk +AD4AZgBhAGwAcwBlADwALwB3ADoAUwBhAHYAZQBJAGYAWABNAEwASQBuAHYAYQBsAGkAZAA+AA0A +CgAgACAAPAB3ADoASQBnAG4AbwByAGUATQBpAHgAZQBkAEMAbwBuAHQAZQBuAHQAPgBmAGEAbABz +AGUAPAAvAHcAOgBJAGcAbgBvAHIAZQBNAGkAeABlAGQAQwBvAG4AdABlAG4AdAA+AA0ACgAgACAA +PAB3ADoAQQBsAHcAYQB5AHMAUwBoAG8AdwBQAGwAYQBjAGUAaABvAGwAZABlAHIAVABlAHgAdAA+ +AGYAYQBsAHMAZQA8AC8AdwA6AEEAbAB3AGEAeQBzAFMAaABvAHcAUABsAGEAYwBlAGgAbwBsAGQA +ZQByAFQAZQB4AHQAPgANAAoAIAAgADwAdwA6AEQAbwBOAG8AdABQAHIAbwBtAG8AdABlAFEARgAv +AD4ADQAKACAAIAA8AHcAOgBMAGkAZABUAGgAZQBtAGUATwB0AGgAZQByAD4ARQBOAC0AVQBTADwA +LwB3ADoATABpAGQAVABoAGUAbQBlAE8AdABoAGUAcgA+AA0ACgAgACAAPAB3ADoATABpAGQAVABo +AGUAbQBlAEEAcwBpAGEAbgA+AFgALQBOAE8ATgBFADwALwB3ADoATABpAGQAVABoAGUAbQBlAEEA +cwBpAGEAbgA+AA0ACgAgACAAPAB3ADoATABpAGQAVABoAGUAbQBlAEMAbwBtAHAAbABlAHgAUwBj +AHIAaQBwAHQAPgBYAC0ATgBPAE4ARQA8AC8AdwA6AEwAaQBkAFQAaABlAG0AZQBDAG8AbQBwAGwA +ZQB4AFMAYwByAGkAcAB0AD4ADQAKACAAIAA8AHcAOgBDAG8AbQBwAGEAdABpAGIAaQBsAGkAdAB5 +AD4ADQAKACAAIAAgADwAdwA6AEIAcgBlAGEAawBXAHIAYQBwAHAAZQBkAFQAYQBiAGwAZQBzAC8A +PgANAAoAIAAgACAAPAB3ADoAUwBwAGwAaQB0AFAAZwBCAHIAZQBhAGsAQQBuAGQAUABhAHIAYQBN +AGEAcgBrAC8APgANAAoAIAAgADwALwB3ADoAQwBvAG0AcABhAHQAaQBiAGkAbABpAHQAeQA+AA0A +CgAgACAAPAB3ADoARABvAE4AbwB0AE8AcAB0AGkAbQBpAHoAZQBGAG8AcgBCAHIAbwB3AHMAZQBy +AC8APgANAAoAIAAgADwAbQA6AG0AYQB0AGgAUAByAD4ADQAKACAAIAAgADwAbQA6AG0AYQB0AGgA +RgBvAG4AdAAgAG0AOgB2AGEAbAA9ACIAQwBhAG0AYgByAGkAYQAgAE0AYQB0AGgAIgAvAD4ADQAK +ACAAIAAgADwAbQA6AGIAcgBrAEIAaQBuACAAbQA6AHYAYQBsAD0AIgBiAGUAZgBvAHIAZQAiAC8A +PgANAAoAIAAgACAAPABtADoAYgByAGsAQgBpAG4AUwB1AGIAIABtADoAdgBhAGwAPQAiACYAIwA0 +ADUAOwAtACIALwA+AA0ACgAgACAAIAA8AG0AOgBzAG0AYQBsAGwARgByAGEAYwAgAG0AOgB2AGEA +bAA9ACIAbwBmAGYAIgAvAD4ADQAKACAAIAAgADwAbQA6AGQAaQBzAHAARABlAGYALwA+AA0ACgAg +ACAAIAA8AG0AOgBsAE0AYQByAGcAaQBuACAAbQA6AHYAYQBsAD0AIgAwACIALwA+AA0ACgAgACAA +IAA8AG0AOgByAE0AYQByAGcAaQBuACAAbQA6AHYAYQBsAD0AIgAwACIALwA+AA0ACgAgACAAIAA8 +AG0AOgBkAGUAZgBKAGMAIABtADoAdgBhAGwAPQAiAGMAZQBuAHQAZQByAEcAcgBvAHUAcAAiAC8A +PgANAAoAIAAgACAAPABtADoAdwByAGEAcABJAG4AZABlAG4AdAAgAG0AOgB2AGEAbAA9ACIAMQA0 +ADQAMAAiAC8APgANAAoAIAAgACAAPABtADoAaQBuAHQATABpAG0AIABtADoAdgBhAGwAPQAiAHMA +dQBiAFMAdQBwACIALwA+AA0ACgAgACAAIAA8AG0AOgBuAGEAcgB5AEwAaQBtACAAbQA6AHYAYQBs +AD0AIgB1AG4AZABPAHYAcgAiAC8APgANAAoAIAAgADwALwBtADoAbQBhAHQAaABQAHIAPgA8AC8A +dwA6AFcAbwByAGQARABvAGMAdQBtAGUAbgB0AD4ADQAKADwALwB4AG0AbAA+ADwAIQBbAGUAbgBk +AGkAZgBdAC0ALQA+ADwAIQAtAC0AWwBpAGYAIABnAHQAZQAgAG0AcwBvACAAOQBdAD4APAB4AG0A +bAA+AA0ACgAgADwAdwA6AEwAYQB0AGUAbgB0AFMAdAB5AGwAZQBzACAARABlAGYATABvAGMAawBl +AGQAUwB0AGEAdABlAD0AIgBmAGEAbABzAGUAIgAgAEQAZQBmAFUAbgBoAGkAZABlAFcAaABlAG4A +VQBzAGUAZAA9ACIAZgBhAGwAcwBlACIADQAKACAAIABEAGUAZgBTAGUAbQBpAEgAaQBkAGQAZQBu +AD0AIgBmAGEAbABzAGUAIgAgAEQAZQBmAFEARgBvAHIAbQBhAHQAPQAiAGYAYQBsAHMAZQAiACAA +RABlAGYAUAByAGkAbwByAGkAdAB5AD0AIgA5ADkAIgANAAoAIAAgAEwAYQB0AGUAbgB0AFMAdAB5 +AGwAZQBDAG8AdQBuAHQAPQAiADMANwA2ACIAPgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUA +cAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5 +AD0AIgAwACIAIABRAEYAbwByAG0AYQB0AD0AIgB0AHIAdQBlACIAIABOAGEAbQBlAD0AIgBOAG8A +cgBtAGEAbAAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABv +AGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA5ACIAIABRAEYA +bwByAG0AYQB0AD0AIgB0AHIAdQBlACIAIABOAGEAbQBlAD0AIgBoAGUAYQBkAGkAbgBnACAAMQAi +AC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQA +PQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA5ACIAIABRAEYAbwByAG0AYQB0 +AD0AIgB0AHIAdQBlACIAIABOAGEAbQBlAD0AIgBoAGUAYQBkAGkAbgBnACAAMgAiAC8APgANAAoA +IAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBs +AHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA5ACIAIABRAEYAbwByAG0AYQB0AD0AIgB0AHIA +dQBlACIAIABOAGEAbQBlAD0AIgBoAGUAYQBkAGkAbgBnACAAMwAiAC8APgANAAoAIAAgADwAdwA6 +AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAA +UAByAGkAbwByAGkAdAB5AD0AIgA5ACIAIABRAEYAbwByAG0AYQB0AD0AIgB0AHIAdQBlACIAIABO +AGEAbQBlAD0AIgBoAGUAYQBkAGkAbgBnACAANAAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUA +eABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwBy +AGkAdAB5AD0AIgA5ACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIADQAKACAA +IAAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiACAAUQBGAG8AcgBt +AGEAdAA9ACIAdAByAHUAZQAiACAATgBhAG0AZQA9ACIAaABlAGEAZABpAG4AZwAgADUAIgAvAD4A +DQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBm +AGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIAOQAiACAAUwBlAG0AaQBIAGkAZABkAGUA +bgA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAi +AHQAcgB1AGUAIgAgAFEARgBvAHIAbQBhAHQAPQAiAHQAcgB1AGUAIgAgAE4AYQBtAGUAPQAiAGgA +ZQBhAGQAaQBuAGcAIAA2ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBv +AG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADkA +IgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgANAAoAIAAgACAAVQBuAGgAaQBk +AGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIAIABRAEYAbwByAG0AYQB0AD0AIgB0AHIA +dQBlACIAIABOAGEAbQBlAD0AIgBoAGUAYQBkAGkAbgBnACAANwAiAC8APgANAAoAIAAgADwAdwA6 +AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAA +UAByAGkAbwByAGkAdAB5AD0AIgA5ACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBl +ACIADQAKACAAIAAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiACAA +UQBGAG8AcgBtAGEAdAA9ACIAdAByAHUAZQAiACAATgBhAG0AZQA9ACIAaABlAGEAZABpAG4AZwAg +ADgAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsA +ZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIAOQAiACAAUwBlAG0AaQBI +AGkAZABkAGUAbgA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUA +cwBlAGQAPQAiAHQAcgB1AGUAIgAgAFEARgBvAHIAbQBhAHQAPQAiAHQAcgB1AGUAIgAgAE4AYQBt +AGUAPQAiAGgAZQBhAGQAaQBuAGcAIAA5ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMA +ZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBk +AGQAZQBuAD0AIgB0AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQA +cgB1AGUAIgANAAoAIAAgACAATgBhAG0AZQA9ACIAaQBuAGQAZQB4ACAAMQAiAC8APgANAAoAIAAg +ADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMA +ZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBo +AGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAGkAbgBkAGUA +eAAgADIAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBj +AGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUA +IgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABO +AGEAbQBlAD0AIgBpAG4AZABlAHgAIAAzACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMA +ZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBk +AGQAZQBuAD0AIgB0AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQA +cgB1AGUAIgANAAoAIAAgACAATgBhAG0AZQA9ACIAaQBuAGQAZQB4ACAANAAiAC8APgANAAoAIAAg +ADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMA +ZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBo +AGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAGkAbgBkAGUA +eAAgADUAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBj +AGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUA +IgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABO +AGEAbQBlAD0AIgBpAG4AZABlAHgAIAA2ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMA +ZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBk +AGQAZQBuAD0AIgB0AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQA +cgB1AGUAIgANAAoAIAAgACAATgBhAG0AZQA9ACIAaQBuAGQAZQB4ACAANwAiAC8APgANAAoAIAAg +ADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMA +ZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBo +AGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAGkAbgBkAGUA +eAAgADgAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBj +AGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUA +IgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABO +AGEAbQBlAD0AIgBpAG4AZABlAHgAIAA5ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMA +ZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0 +AHkAPQAiADMAOQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiAA0ACgAgACAA +IABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgAgAE4AYQBtAGUAPQAi +AHQAbwBjACAAMQAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAA +TABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgAzADkAIgAg +AFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgANAAoAIAAgACAAVQBuAGgAaQBkAGUA +VwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIAIABOAGEAbQBlAD0AIgB0AG8AYwAgADIAIgAv +AD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0A +IgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIAMwA5ACIAIABTAGUAbQBpAEgAaQBk +AGQAZQBuAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUA +ZAA9ACIAdAByAHUAZQAiACAATgBhAG0AZQA9ACIAdABvAGMAIAAzACIALwA+AA0ACgAgACAAPAB3 +ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIA +IABQAHIAaQBvAHIAaQB0AHkAPQAiADMAOQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdABy +AHUAZQAiAA0ACgAgACAAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUA +IgAgAE4AYQBtAGUAPQAiAHQAbwBjACAANAAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABj +AGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkA +dAB5AD0AIgAzADkAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgANAAoAIAAg +ACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIAIABOAGEAbQBlAD0A +IgB0AG8AYwAgADUAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAg +AEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIAMwA5ACIA +IABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAFUAbgBoAGkAZABl +AFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiACAATgBhAG0AZQA9ACIAdABvAGMAIAA2ACIA +LwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9 +ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADMAOQAiACAAUwBlAG0AaQBIAGkA +ZABkAGUAbgA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBl +AGQAPQAiAHQAcgB1AGUAIgAgAE4AYQBtAGUAPQAiAHQAbwBjACAANwAiAC8APgANAAoAIAAgADwA +dwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAi +ACAAUAByAGkAbwByAGkAdAB5AD0AIgAzADkAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQA +cgB1AGUAIgANAAoAIAAgACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBl +ACIAIABOAGEAbQBlAD0AIgB0AG8AYwAgADgAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgA +YwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBp +AHQAeQA9ACIAMwA5ACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIADQAKACAA +IAAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiACAATgBhAG0AZQA9 +ACIAdABvAGMAIAA5ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4A +IABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0 +AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoA +IAAgACAATgBhAG0AZQA9ACIATgBvAHIAbQBhAGwAIABJAG4AZABlAG4AdAAiAC8APgANAAoAIAAg +ADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMA +ZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBo +AGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAGYAbwBvAHQA +bgBvAHQAZQAgAHQAZQB4AHQAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABp +AG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4A +PQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAi +AA0ACgAgACAAIABOAGEAbQBlAD0AIgBhAG4AbgBvAHQAYQB0AGkAbwBuACAAdABlAHgAdAAiAC8A +PgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAi +AGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgA +aQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAi +AGgAZQBhAGQAZQByACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4A +IABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0 +AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoA +IAAgACAATgBhAG0AZQA9ACIAZgBvAG8AdABlAHIAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABF +AHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkA +SABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9 +ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBpAG4AZABlAHgAIABoAGUAYQBkAGkA +bgBnACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBr +AGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADMANQAiACAAUwBlAG0A +aQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABVAG4AaABpAGQAZQBXAGgAZQBu +AFUAcwBlAGQAPQAiAHQAcgB1AGUAIgAgAFEARgBvAHIAbQBhAHQAPQAiAHQAcgB1AGUAIgAgAE4A +YQBtAGUAPQAiAGMAYQBwAHQAaQBvAG4AIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBl +AHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQA +ZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdABy +AHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgB0AGEAYgBsAGUAIABvAGYAIABmAGkAZwB1AHIA +ZQBzACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBr +AGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIA +IABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoAIAAgACAATgBh +AG0AZQA9ACIAZQBuAHYAZQBsAG8AcABlACAAYQBkAGQAcgBlAHMAcwAiAC8APgANAAoAIAAgADwA +dwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAi +ACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUA +bgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAGUAbgB2AGUAbABv +AHAAZQAgAHIAZQB0AHUAcgBuACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQA +aQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBu +AD0AIgB0AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUA +IgANAAoAIAAgACAATgBhAG0AZQA9ACIAZgBvAG8AdABuAG8AdABlACAAcgBlAGYAZQByAGUAbgBj +AGUAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsA +ZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAg +AFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEA +bQBlAD0AIgBhAG4AbgBvAHQAYQB0AGkAbwBuACAAcgBlAGYAZQByAGUAbgBjAGUAIgAvAD4ADQAK +ACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEA +bABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABl +AFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBsAGkA +bgBlACAAbgB1AG0AYgBlAHIAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABp +AG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4A +PQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAi +AA0ACgAgACAAIABOAGEAbQBlAD0AIgBwAGEAZwBlACAAbgB1AG0AYgBlAHIAIgAvAD4ADQAKACAA +IAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABz +AGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcA +aABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBlAG4AZABu +AG8AdABlACAAcgBlAGYAZQByAGUAbgBjAGUAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgA +YwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABp +AGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIA +dAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBlAG4AZABuAG8AdABlACAAdABlAHgAdAAi +AC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQA +PQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBu +AGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUA +PQAiAHQAYQBiAGwAZQAgAG8AZgAgAGEAdQB0AGgAbwByAGkAdABpAGUAcwAiAC8APgANAAoAIAAg +ADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMA +ZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBo +AGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAG0AYQBjAHIA +bwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBl +AGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAA +VQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBt +AGUAPQAiAHQAbwBhACAAaABlAGEAZABpAG4AZwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUA +eABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBI +AGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0A +IgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAEwAaQBzAHQAIgAvAD4ADQAKACAAIAA8 +AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUA +IgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABl +AG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBMAGkAcwB0ACAA +QgB1AGwAbABlAHQAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAg +AEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQA +cgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAg +ACAAIABOAGEAbQBlAD0AIgBMAGkAcwB0ACAATgB1AG0AYgBlAHIAIgAvAD4ADQAKACAAIAA8AHcA +OgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAg +AFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4A +VQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBMAGkAcwB0ACAAMgAi +AC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQA +PQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBu +AGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUA +PQAiAEwAaQBzAHQAIAAzACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBv +AG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0A +IgB0AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgAN +AAoAIAAgACAATgBhAG0AZQA9ACIATABpAHMAdAAgADQAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMA +ZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBt +AGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUA +ZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBMAGkAcwB0ACAANQAiAC8APgAN +AAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYA +YQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBk +AGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAEwA +aQBzAHQAIABCAHUAbABsAGUAdAAgADIAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBl +AHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQA +ZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdABy +AHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBMAGkAcwB0ACAAQgB1AGwAbABlAHQAIAAzACIA +LwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9 +ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIAIABVAG4A +aABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoAIAAgACAATgBhAG0AZQA9 +ACIATABpAHMAdAAgAEIAdQBsAGwAZQB0ACAANAAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUA +eABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBI +AGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0A +IgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAEwAaQBzAHQAIABCAHUAbABsAGUAdAAg +ADUAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsA +ZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAg +AFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEA +bQBlAD0AIgBMAGkAcwB0ACAATgB1AG0AYgBlAHIAIAAyACIALwA+AA0ACgAgACAAPAB3ADoATABz +AGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUA +bQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBl +AGQAPQAiAHQAcgB1AGUAIgANAAoAIAAgACAATgBhAG0AZQA9ACIATABpAHMAdAAgAE4AdQBtAGIA +ZQByACAAMwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABv +AGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUA +ZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAg +AE4AYQBtAGUAPQAiAEwAaQBzAHQAIABOAHUAbQBiAGUAcgAgADQAIgAvAD4ADQAKACAAIAA8AHcA +OgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAg +AFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4A +VQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBMAGkAcwB0ACAATgB1 +AG0AYgBlAHIAIAA1ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4A +IABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADEAMAAi +ACAAUQBGAG8AcgBtAGEAdAA9ACIAdAByAHUAZQAiACAATgBhAG0AZQA9ACIAVABpAHQAbABlACIA +LwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9 +ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIAIABVAG4A +aABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoAIAAgACAATgBhAG0AZQA9 +ACIAQwBsAG8AcwBpAG4AZwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkA +bwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9 +ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIA +DQAKACAAIAAgAE4AYQBtAGUAPQAiAFMAaQBnAG4AYQB0AHUAcgBlACIALwA+AA0ACgAgACAAPAB3 +ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIA +IABQAHIAaQBvAHIAaQB0AHkAPQAiADEAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1 +AGUAIgANAAoAIAAgACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIA +IABOAGEAbQBlAD0AIgBEAGUAZgBhAHUAbAB0ACAAUABhAHIAYQBnAHIAYQBwAGgAIABGAG8AbgB0 +ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUA +ZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIAIABV +AG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoAIAAgACAATgBhAG0A +ZQA9ACIAQgBvAGQAeQAgAFQAZQB4AHQAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBl +AHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQA +ZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdABy +AHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBCAG8AZAB5ACAAVABlAHgAdAAgAEkAbgBkAGUA +bgB0ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBr +AGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIA +IABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoAIAAgACAATgBh +AG0AZQA9ACIATABpAHMAdAAgAEMAbwBuAHQAaQBuAHUAZQAiAC8APgANAAoAIAAgADwAdwA6AEwA +cwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBl +AG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMA +ZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAEwAaQBzAHQAIABDAG8AbgB0 +AGkAbgB1AGUAIAAyACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4A +IABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0 +AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoA +IAAgACAATgBhAG0AZQA9ACIATABpAHMAdAAgAEMAbwBuAHQAaQBuAHUAZQAgADMAIgAvAD4ADQAK +ACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEA +bABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABl +AFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBMAGkA +cwB0ACAAQwBvAG4AdABpAG4AdQBlACAANAAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABj +AGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkA +ZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0 +AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAEwAaQBzAHQAIABDAG8AbgB0AGkAbgB1AGUA +IAA1ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBr +AGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIA +IABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoAIAAgACAATgBh +AG0AZQA9ACIATQBlAHMAcwBhAGcAZQAgAEgAZQBhAGQAZQByACIALwA+AA0ACgAgACAAPAB3ADoA +TABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQ +AHIAaQBvAHIAaQB0AHkAPQAiADEAMQAiACAAUQBGAG8AcgBtAGEAdAA9ACIAdAByAHUAZQAiACAA +TgBhAG0AZQA9ACIAUwB1AGIAdABpAHQAbABlACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4 +AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgA +aQBkAGQAZQBuAD0AIgB0AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAi +AHQAcgB1AGUAIgANAAoAIAAgACAATgBhAG0AZQA9ACIAUwBhAGwAdQB0AGEAdABpAG8AbgAiAC8A +PgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAi +AGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgA +aQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAi +AEQAYQB0AGUAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwA +bwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1 +AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAA +IABOAGEAbQBlAD0AIgBCAG8AZAB5ACAAVABlAHgAdAAgAEYAaQByAHMAdAAgAEkAbgBkAGUAbgB0 +ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUA +ZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIAIABV +AG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoAIAAgACAATgBhAG0A +ZQA9ACIAQgBvAGQAeQAgAFQAZQB4AHQAIABGAGkAcgBzAHQAIABJAG4AZABlAG4AdAAgADIAIgAv +AD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0A +IgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBo +AGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0A +IgBOAG8AdABlACAASABlAGEAZABpAG4AZwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABj +AGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkA +ZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0 +AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAEIAbwBkAHkAIABUAGUAeAB0ACAAMgAiAC8A +PgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAi +AGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgA +aQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAi +AEIAbwBkAHkAIABUAGUAeAB0ACAAMwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUA +cAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABk +AGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIA +dQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAEIAbwBkAHkAIABUAGUAeAB0ACAASQBuAGQAZQBu +AHQAIAAyACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8A +YwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBl +ACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoAIAAgACAA +TgBhAG0AZQA9ACIAQgBvAGQAeQAgAFQAZQB4AHQAIABJAG4AZABlAG4AdAAgADMAIgAvAD4ADQAK +ACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEA +bABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABl +AFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBCAGwA +bwBjAGsAIABUAGUAeAB0ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBv +AG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0A +IgB0AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgAN +AAoAIAAgACAATgBhAG0AZQA9ACIASAB5AHAAZQByAGwAaQBuAGsAIgAvAD4ADQAKACAAIAA8AHcA +OgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAg +AFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4A +VQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBGAG8AbABsAG8AdwBl +AGQASAB5AHAAZQByAGwAaQBuAGsAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAA +dABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9 +ACIAMgAyACIAIABRAEYAbwByAG0AYQB0AD0AIgB0AHIAdQBlACIAIABOAGEAbQBlAD0AIgBTAHQA +cgBvAG4AZwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABv +AGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgAyADAAIgAgAFEA +RgBvAHIAbQBhAHQAPQAiAHQAcgB1AGUAIgAgAE4AYQBtAGUAPQAiAEUAbQBwAGgAYQBzAGkAcwAi +AC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQA +PQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBu +AGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUA +PQAiAEQAbwBjAHUAbQBlAG4AdAAgAE0AYQBwACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4 +AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgA +aQBkAGQAZQBuAD0AIgB0AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAi +AHQAcgB1AGUAIgANAAoAIAAgACAATgBhAG0AZQA9ACIAUABsAGEAaQBuACAAVABlAHgAdAAiAC8A +PgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAi +AGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgA +aQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAi +AEUALQBtAGEAaQBsACAAUwBpAGcAbgBhAHQAdQByAGUAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMA +ZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBt +AGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUA +ZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBIAFQATQBMACAAVABvAHAAIABv +AGYAIABGAG8AcgBtACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4A +IABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0 +AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoA +IAAgACAATgBhAG0AZQA9ACIASABUAE0ATAAgAEIAbwB0AHQAbwBtACAAbwBmACAARgBvAHIAbQAi +AC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQA +PQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBu +AGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUA +PQAiAE4AbwByAG0AYQBsACAAKABXAGUAYgApACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4 +AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgA +aQBkAGQAZQBuAD0AIgB0AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAi +AHQAcgB1AGUAIgANAAoAIAAgACAATgBhAG0AZQA9ACIASABUAE0ATAAgAEEAYwByAG8AbgB5AG0A +IgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBk +AD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUA +bgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBl +AD0AIgBIAFQATQBMACAAQQBkAGQAcgBlAHMAcwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUA +eABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBI +AGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0A +IgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAEgAVABNAEwAIABDAGkAdABlACIALwA+ +AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIA +ZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIAIABVAG4AaABp +AGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoAIAAgACAATgBhAG0AZQA9ACIA +SABUAE0ATAAgAEMAbwBkAGUAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABp +AG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4A +PQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAi +AA0ACgAgACAAIABOAGEAbQBlAD0AIgBIAFQATQBMACAARABlAGYAaQBuAGkAdABpAG8AbgAiAC8A +PgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAi +AGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgA +aQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAi +AEgAVABNAEwAIABLAGUAeQBiAG8AYQByAGQAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgA +YwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABp +AGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIA +dAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBIAFQATQBMACAAUAByAGUAZgBvAHIAbQBh +AHQAdABlAGQAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwA +bwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1 +AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAA +IABOAGEAbQBlAD0AIgBIAFQATQBMACAAUwBhAG0AcABsAGUAIgAvAD4ADQAKACAAIAA8AHcAOgBM +AHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMA +ZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBz +AGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBIAFQATQBMACAAVAB5AHAA +ZQB3AHIAaQB0AGUAcgAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBu +ACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIA +dAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAK +ACAAIAAgAE4AYQBtAGUAPQAiAEgAVABNAEwAIABWAGEAcgBpAGEAYgBsAGUAIgAvAD4ADQAKACAA +IAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABz +AGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcA +aABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBOAG8AcgBt +AGEAbAAgAFQAYQBiAGwAZQAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkA +bwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9 +ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIA +DQAKACAAIAAgAE4AYQBtAGUAPQAiAGEAbgBuAG8AdABhAHQAaQBvAG4AIABzAHUAYgBqAGUAYwB0 +ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUA +ZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIAIABV +AG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoAIAAgACAATgBhAG0A +ZQA9ACIATgBvACAATABpAHMAdAAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0 +AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUA +bgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBl +ACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAE8AdQB0AGwAaQBuAGUAIABMAGkAcwB0ACAAMQAiAC8A +PgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAi +AGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgA +aQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAi +AE8AdQB0AGwAaQBuAGUAIABMAGkAcwB0ACAAMgAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUA +eABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBI +AGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0A +IgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAE8AdQB0AGwAaQBuAGUAIABMAGkAcwB0 +ACAAMwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMA +awBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAi +ACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4A +YQBtAGUAPQAiAFQAYQBiAGwAZQAgAFMAaQBtAHAAbABlACAAMQAiAC8APgANAAoAIAAgADwAdwA6 +AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAA +UwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBV +AHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAFQAYQBiAGwAZQAgAFMA +aQBtAHAAbABlACAAMgAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBu +ACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIA +dAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAK +ACAAIAAgAE4AYQBtAGUAPQAiAFQAYQBiAGwAZQAgAFMAaQBtAHAAbABlACAAMwAiAC8APgANAAoA +IAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBs +AHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUA +VwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAFQAYQBi +AGwAZQAgAEMAbABhAHMAcwBpAGMAIAAxACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMA +ZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBk +AGQAZQBuAD0AIgB0AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQA +cgB1AGUAIgANAAoAIAAgACAATgBhAG0AZQA9ACIAVABhAGIAbABlACAAQwBsAGEAcwBzAGkAYwAg +ADIAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsA +ZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAg +AFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEA +bQBlAD0AIgBUAGEAYgBsAGUAIABDAGwAYQBzAHMAaQBjACAAMwAiAC8APgANAAoAIAAgADwAdwA6 +AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAA +UwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBV +AHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAFQAYQBiAGwAZQAgAEMA +bABhAHMAcwBpAGMAIAA0ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBv +AG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0A +IgB0AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgAN +AAoAIAAgACAATgBhAG0AZQA9ACIAVABhAGIAbABlACAAQwBvAGwAbwByAGYAdQBsACAAMQAiAC8A +PgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAi +AGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgA +aQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAi +AFQAYQBiAGwAZQAgAEMAbwBsAG8AcgBmAHUAbAAgADIAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMA +ZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBt +AGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUA +ZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBUAGEAYgBsAGUAIABDAG8AbABv +AHIAZgB1AGwAIAAzACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4A +IABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0 +AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoA +IAAgACAATgBhAG0AZQA9ACIAVABhAGIAbABlACAAQwBvAGwAdQBtAG4AcwAgADEAIgAvAD4ADQAK +ACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEA +bABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABl +AFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBUAGEA +YgBsAGUAIABDAG8AbAB1AG0AbgBzACAAMgAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABj +AGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkA +ZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0 +AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAFQAYQBiAGwAZQAgAEMAbwBsAHUAbQBuAHMA +IAAzACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBr +AGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIA +IABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoAIAAgACAATgBh +AG0AZQA9ACIAVABhAGIAbABlACAAQwBvAGwAdQBtAG4AcwAgADQAIgAvAD4ADQAKACAAIAA8AHcA +OgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAg +AFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4A +VQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBUAGEAYgBsAGUAIABD +AG8AbAB1AG0AbgBzACAANQAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkA +bwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9 +ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIA +DQAKACAAIAAgAE4AYQBtAGUAPQAiAFQAYQBiAGwAZQAgAEcAcgBpAGQAIAAxACIALwA+AA0ACgAg +ACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwA +cwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIAIABVAG4AaABpAGQAZQBX +AGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoAIAAgACAATgBhAG0AZQA9ACIAVABhAGIA +bABlACAARwByAGkAZAAgADIAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABp +AG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4A +PQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAi +AA0ACgAgACAAIABOAGEAbQBlAD0AIgBUAGEAYgBsAGUAIABHAHIAaQBkACAAMwAiAC8APgANAAoA +IAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBs +AHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUA +VwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAFQAYQBi +AGwAZQAgAEcAcgBpAGQAIAA0ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQA +aQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBu +AD0AIgB0AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUA +IgANAAoAIAAgACAATgBhAG0AZQA9ACIAVABhAGIAbABlACAARwByAGkAZAAgADUAIgAvAD4ADQAK +ACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEA +bABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABl +AFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBUAGEA +YgBsAGUAIABHAHIAaQBkACAANgAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0 +AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUA +bgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBl +ACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAFQAYQBiAGwAZQAgAEcAcgBpAGQAIAA3ACIALwA+AA0A +CgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBh +AGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIAIABVAG4AaABpAGQA +ZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoAIAAgACAATgBhAG0AZQA9ACIAVABh +AGIAbABlACAARwByAGkAZAAgADgAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAA +dABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABl +AG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUA +ZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBUAGEAYgBsAGUAIABMAGkAcwB0ACAAMQAiAC8APgAN +AAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYA +YQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBk +AGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAFQA +YQBiAGwAZQAgAEwAaQBzAHQAIAAyACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBw +AHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQA +ZQBuAD0AIgB0AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1 +AGUAIgANAAoAIAAgACAATgBhAG0AZQA9ACIAVABhAGIAbABlACAATABpAHMAdAAgADMAIgAvAD4A +DQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBm +AGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkA +ZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBU +AGEAYgBsAGUAIABMAGkAcwB0ACAANAAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUA +cAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABk +AGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIA +dQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAFQAYQBiAGwAZQAgAEwAaQBzAHQAIAA1ACIALwA+ +AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIA +ZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIAIABVAG4AaABp +AGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoAIAAgACAATgBhAG0AZQA9ACIA +VABhAGIAbABlACAATABpAHMAdAAgADYAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBl +AHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQA +ZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdABy +AHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBUAGEAYgBsAGUAIABMAGkAcwB0ACAANwAiAC8A +PgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAi +AGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgA +aQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAi +AFQAYQBiAGwAZQAgAEwAaQBzAHQAIAA4ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMA +ZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBk +AGQAZQBuAD0AIgB0AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQA +cgB1AGUAIgANAAoAIAAgACAATgBhAG0AZQA9ACIAVABhAGIAbABlACAAMwBEACAAZQBmAGYAZQBj +AHQAcwAgADEAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwA +bwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1 +AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAA +IABOAGEAbQBlAD0AIgBUAGEAYgBsAGUAIAAzAEQAIABlAGYAZgBlAGMAdABzACAAMgAiAC8APgAN +AAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYA +YQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBk +AGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAFQA +YQBiAGwAZQAgADMARAAgAGUAZgBmAGUAYwB0AHMAIAAzACIALwA+AA0ACgAgACAAPAB3ADoATABz +AGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUA +bQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBl +AGQAPQAiAHQAcgB1AGUAIgANAAoAIAAgACAATgBhAG0AZQA9ACIAVABhAGIAbABlACAAQwBvAG4A +dABlAG0AcABvAHIAYQByAHkAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABp +AG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4A +PQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAi +AA0ACgAgACAAIABOAGEAbQBlAD0AIgBUAGEAYgBsAGUAIABFAGwAZQBnAGEAbgB0ACIALwA+AA0A +CgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBh +AGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIAIABVAG4AaABpAGQA +ZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoAIAAgACAATgBhAG0AZQA9ACIAVABh +AGIAbABlACAAUAByAG8AZgBlAHMAcwBpAG8AbgBhAGwAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMA +ZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBt +AGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUA +ZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBUAGEAYgBsAGUAIABTAHUAYgB0 +AGwAZQAgADEAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwA +bwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1 +AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAA +IABOAGEAbQBlAD0AIgBUAGEAYgBsAGUAIABTAHUAYgB0AGwAZQAgADIAIgAvAD4ADQAKACAAIAA8 +AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUA +IgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABl +AG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBUAGEAYgBsAGUA +IABXAGUAYgAgADEAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAg +AEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQA +cgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAg +ACAAIABOAGEAbQBlAD0AIgBUAGEAYgBsAGUAIABXAGUAYgAgADIAIgAvAD4ADQAKACAAIAA8AHcA +OgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAg +AFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4A +VQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBUAGEAYgBsAGUAIABX +AGUAYgAgADMAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwA +bwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1 +AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAA +IABOAGEAbQBlAD0AIgBCAGEAbABsAG8AbwBuACAAVABlAHgAdAAiAC8APgANAAoAIAAgADwAdwA6 +AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAA +UAByAGkAbwByAGkAdAB5AD0AIgAzADkAIgAgAE4AYQBtAGUAPQAiAFQAYQBiAGwAZQAgAEcAcgBp +AGQAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsA +ZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAg +AFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEA +bQBlAD0AIgBUAGEAYgBsAGUAIABUAGgAZQBtAGUAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABF +AHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkA +SABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAE4AYQBtAGUAPQAiAFAAbABhAGMAZQBoAG8AbABk +AGUAcgAgAFQAZQB4AHQAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8A +bgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIAMQAi +ACAAUQBGAG8AcgBtAGEAdAA9ACIAdAByAHUAZQAiACAATgBhAG0AZQA9ACIATgBvACAAUwBwAGEA +YwBpAG4AZwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABv +AGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA2ADAAIgAgAE4A +YQBtAGUAPQAiAEwAaQBnAGgAdAAgAFMAaABhAGQAaQBuAGcAIgAvAD4ADQAKACAAIAA8AHcAOgBM +AHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAA +cgBpAG8AcgBpAHQAeQA9ACIANgAxACIAIABOAGEAbQBlAD0AIgBMAGkAZwBoAHQAIABMAGkAcwB0 +ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUA +ZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADYAMgAiACAATgBhAG0AZQA9 +ACIATABpAGcAaAB0ACAARwByAGkAZAAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUA +cAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5 +AD0AIgA2ADMAIgAgAE4AYQBtAGUAPQAiAE0AZQBkAGkAdQBtACAAUwBoAGEAZABpAG4AZwAgADEA +IgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBk +AD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANgA0ACIAIABOAGEAbQBlAD0A +IgBNAGUAZABpAHUAbQAgAFMAaABhAGQAaQBuAGcAIAAyACIALwA+AA0ACgAgACAAPAB3ADoATABz +AGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIA +aQBvAHIAaQB0AHkAPQAiADYANQAiACAATgBhAG0AZQA9ACIATQBlAGQAaQB1AG0AIABMAGkAcwB0 +ACAAMQAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMA +awBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA2ADYAIgAgAE4AYQBt +AGUAPQAiAE0AZQBkAGkAdQBtACAATABpAHMAdAAgADIAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMA +ZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBp +AG8AcgBpAHQAeQA9ACIANgA3ACIAIABOAGEAbQBlAD0AIgBNAGUAZABpAHUAbQAgAEcAcgBpAGQA +IAAxACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBr +AGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADYAOAAiACAATgBhAG0A +ZQA9ACIATQBlAGQAaQB1AG0AIABHAHIAaQBkACAAMgAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBk +AEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkA +bwByAGkAdAB5AD0AIgA2ADkAIgAgAE4AYQBtAGUAPQAiAE0AZQBkAGkAdQBtACAARwByAGkAZAAg +ADMAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsA +ZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANwAwACIAIABOAGEAbQBl +AD0AIgBEAGEAcgBrACAATABpAHMAdAAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUA +cAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5 +AD0AIgA3ADEAIgAgAE4AYQBtAGUAPQAiAEMAbwBsAG8AcgBmAHUAbAAgAFMAaABhAGQAaQBuAGcA +IgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBk +AD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANwAyACIAIABOAGEAbQBlAD0A +IgBDAG8AbABvAHIAZgB1AGwAIABMAGkAcwB0ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4 +AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIA +aQB0AHkAPQAiADcAMwAiACAATgBhAG0AZQA9ACIAQwBvAGwAbwByAGYAdQBsACAARwByAGkAZAAi +AC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQA +PQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA2ADAAIgAgAE4AYQBtAGUAPQAi +AEwAaQBnAGgAdAAgAFMAaABhAGQAaQBuAGcAIABBAGMAYwBlAG4AdAAgADEAIgAvAD4ADQAKACAA +IAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABz +AGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANgAxACIAIABOAGEAbQBlAD0AIgBMAGkAZwBoAHQA +IABMAGkAcwB0ACAAQQBjAGMAZQBuAHQAIAAxACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4 +AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIA +aQB0AHkAPQAiADYAMgAiACAATgBhAG0AZQA9ACIATABpAGcAaAB0ACAARwByAGkAZAAgAEEAYwBj +AGUAbgB0ACAAMQAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAA +TABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA2ADMAIgAg +AE4AYQBtAGUAPQAiAE0AZQBkAGkAdQBtACAAUwBoAGEAZABpAG4AZwAgADEAIABBAGMAYwBlAG4A +dAAgADEAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBj +AGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANgA0ACIAIABOAGEA +bQBlAD0AIgBNAGUAZABpAHUAbQAgAFMAaABhAGQAaQBuAGcAIAAyACAAQQBjAGMAZQBuAHQAIAAx +ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUA +ZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADYANQAiACAATgBhAG0AZQA9 +ACIATQBlAGQAaQB1AG0AIABMAGkAcwB0ACAAMQAgAEEAYwBjAGUAbgB0ACAAMQAiAC8APgANAAoA +IAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBs +AHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAATgBhAG0AZQA9ACIA +UgBlAHYAaQBzAGkAbwBuACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBv +AG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADMA +NAAiACAAUQBGAG8AcgBtAGEAdAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBM +AGkAcwB0ACAAUABhAHIAYQBnAHIAYQBwAGgAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgA +YwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBp +AHQAeQA9ACIAMgA5ACIAIABRAEYAbwByAG0AYQB0AD0AIgB0AHIAdQBlACIAIABOAGEAbQBlAD0A +IgBRAHUAbwB0AGUAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAg +AEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIAMwAwACIA +IABRAEYAbwByAG0AYQB0AD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAEkAbgB0 +AGUAbgBzAGUAIABRAHUAbwB0AGUAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAA +dABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9 +ACIANgA2ACIAIABOAGEAbQBlAD0AIgBNAGUAZABpAHUAbQAgAEwAaQBzAHQAIAAyACAAQQBjAGMA +ZQBuAHQAIAAxACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABM +AG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADYANwAiACAA +TgBhAG0AZQA9ACIATQBlAGQAaQB1AG0AIABHAHIAaQBkACAAMQAgAEEAYwBjAGUAbgB0ACAAMQAi +AC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQA +PQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA2ADgAIgAgAE4AYQBtAGUAPQAi +AE0AZQBkAGkAdQBtACAARwByAGkAZAAgADIAIABBAGMAYwBlAG4AdAAgADEAIgAvAD4ADQAKACAA +IAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABz +AGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANgA5ACIAIABOAGEAbQBlAD0AIgBNAGUAZABpAHUA +bQAgAEcAcgBpAGQAIAAzACAAQQBjAGMAZQBuAHQAIAAxACIALwA+AA0ACgAgACAAPAB3ADoATABz +AGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIA +aQBvAHIAaQB0AHkAPQAiADcAMAAiACAATgBhAG0AZQA9ACIARABhAHIAawAgAEwAaQBzAHQAIABB +AGMAYwBlAG4AdAAgADEAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8A +bgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANwAx +ACIAIABOAGEAbQBlAD0AIgBDAG8AbABvAHIAZgB1AGwAIABTAGgAYQBkAGkAbgBnACAAQQBjAGMA +ZQBuAHQAIAAxACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABM +AG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADcAMgAiACAA +TgBhAG0AZQA9ACIAQwBvAGwAbwByAGYAdQBsACAATABpAHMAdAAgAEEAYwBjAGUAbgB0ACAAMQAi +AC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQA +PQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA3ADMAIgAgAE4AYQBtAGUAPQAi +AEMAbwBsAG8AcgBmAHUAbAAgAEcAcgBpAGQAIABBAGMAYwBlAG4AdAAgADEAIgAvAD4ADQAKACAA +IAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABz +AGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANgAwACIAIABOAGEAbQBlAD0AIgBMAGkAZwBoAHQA +IABTAGgAYQBkAGkAbgBnACAAQQBjAGMAZQBuAHQAIAAyACIALwA+AA0ACgAgACAAPAB3ADoATABz +AGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIA +aQBvAHIAaQB0AHkAPQAiADYAMQAiACAATgBhAG0AZQA9ACIATABpAGcAaAB0ACAATABpAHMAdAAg +AEEAYwBjAGUAbgB0ACAAMgAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkA +bwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA2 +ADIAIgAgAE4AYQBtAGUAPQAiAEwAaQBnAGgAdAAgAEcAcgBpAGQAIABBAGMAYwBlAG4AdAAgADIA +IgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBk +AD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANgAzACIAIABOAGEAbQBlAD0A +IgBNAGUAZABpAHUAbQAgAFMAaABhAGQAaQBuAGcAIAAxACAAQQBjAGMAZQBuAHQAIAAyACIALwA+ +AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIA +ZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADYANAAiACAATgBhAG0AZQA9ACIATQBl +AGQAaQB1AG0AIABTAGgAYQBkAGkAbgBnACAAMgAgAEEAYwBjAGUAbgB0ACAAMgAiAC8APgANAAoA +IAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBs +AHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA2ADUAIgAgAE4AYQBtAGUAPQAiAE0AZQBkAGkA +dQBtACAATABpAHMAdAAgADEAIABBAGMAYwBlAG4AdAAgADIAIgAvAD4ADQAKACAAIAA8AHcAOgBM +AHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAA +cgBpAG8AcgBpAHQAeQA9ACIANgA2ACIAIABOAGEAbQBlAD0AIgBNAGUAZABpAHUAbQAgAEwAaQBz +AHQAIAAyACAAQQBjAGMAZQBuAHQAIAAyACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMA +ZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0 +AHkAPQAiADYANwAiACAATgBhAG0AZQA9ACIATQBlAGQAaQB1AG0AIABHAHIAaQBkACAAMQAgAEEA +YwBjAGUAbgB0ACAAMgAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBu +ACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA2ADgA +IgAgAE4AYQBtAGUAPQAiAE0AZQBkAGkAdQBtACAARwByAGkAZAAgADIAIABBAGMAYwBlAG4AdAAg +ADIAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsA +ZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANgA5ACIAIABOAGEAbQBl +AD0AIgBNAGUAZABpAHUAbQAgAEcAcgBpAGQAIAAzACAAQQBjAGMAZQBuAHQAIAAyACIALwA+AA0A +CgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBh +AGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADcAMAAiACAATgBhAG0AZQA9ACIARABhAHIA +awAgAEwAaQBzAHQAIABBAGMAYwBlAG4AdAAgADIAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABF +AHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8A +cgBpAHQAeQA9ACIANwAxACIAIABOAGEAbQBlAD0AIgBDAG8AbABvAHIAZgB1AGwAIABTAGgAYQBk +AGkAbgBnACAAQQBjAGMAZQBuAHQAIAAyACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMA +ZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0 +AHkAPQAiADcAMgAiACAATgBhAG0AZQA9ACIAQwBvAGwAbwByAGYAdQBsACAATABpAHMAdAAgAEEA +YwBjAGUAbgB0ACAAMgAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBu +ACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA3ADMA +IgAgAE4AYQBtAGUAPQAiAEMAbwBsAG8AcgBmAHUAbAAgAEcAcgBpAGQAIABBAGMAYwBlAG4AdAAg +ADIAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsA +ZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANgAwACIAIABOAGEAbQBl +AD0AIgBMAGkAZwBoAHQAIABTAGgAYQBkAGkAbgBnACAAQQBjAGMAZQBuAHQAIAAzACIALwA+AA0A +CgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBh +AGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADYAMQAiACAATgBhAG0AZQA9ACIATABpAGcA +aAB0ACAATABpAHMAdAAgAEEAYwBjAGUAbgB0ACAAMwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBk +AEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkA +bwByAGkAdAB5AD0AIgA2ADIAIgAgAE4AYQBtAGUAPQAiAEwAaQBnAGgAdAAgAEcAcgBpAGQAIABB +AGMAYwBlAG4AdAAgADMAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8A +bgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANgAz +ACIAIABOAGEAbQBlAD0AIgBNAGUAZABpAHUAbQAgAFMAaABhAGQAaQBuAGcAIAAxACAAQQBjAGMA +ZQBuAHQAIAAzACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABM +AG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADYANAAiACAA +TgBhAG0AZQA9ACIATQBlAGQAaQB1AG0AIABTAGgAYQBkAGkAbgBnACAAMgAgAEEAYwBjAGUAbgB0 +ACAAMwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMA +awBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA2ADUAIgAgAE4AYQBt +AGUAPQAiAE0AZQBkAGkAdQBtACAATABpAHMAdAAgADEAIABBAGMAYwBlAG4AdAAgADMAIgAvAD4A +DQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBm +AGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANgA2ACIAIABOAGEAbQBlAD0AIgBNAGUA +ZABpAHUAbQAgAEwAaQBzAHQAIAAyACAAQQBjAGMAZQBuAHQAIAAzACIALwA+AA0ACgAgACAAPAB3 +ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIA +IABQAHIAaQBvAHIAaQB0AHkAPQAiADYANwAiACAATgBhAG0AZQA9ACIATQBlAGQAaQB1AG0AIABH +AHIAaQBkACAAMQAgAEEAYwBjAGUAbgB0ACAAMwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUA +eABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwBy +AGkAdAB5AD0AIgA2ADgAIgAgAE4AYQBtAGUAPQAiAE0AZQBkAGkAdQBtACAARwByAGkAZAAgADIA +IABBAGMAYwBlAG4AdAAgADMAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABp +AG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIA +NgA5ACIAIABOAGEAbQBlAD0AIgBNAGUAZABpAHUAbQAgAEcAcgBpAGQAIAAzACAAQQBjAGMAZQBu +AHQAIAAzACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8A +YwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADcAMAAiACAATgBh +AG0AZQA9ACIARABhAHIAawAgAEwAaQBzAHQAIABBAGMAYwBlAG4AdAAgADMAIgAvAD4ADQAKACAA +IAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABz +AGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANwAxACIAIABOAGEAbQBlAD0AIgBDAG8AbABvAHIA +ZgB1AGwAIABTAGgAYQBkAGkAbgBnACAAQQBjAGMAZQBuAHQAIAAzACIALwA+AA0ACgAgACAAPAB3 +ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIA +IABQAHIAaQBvAHIAaQB0AHkAPQAiADcAMgAiACAATgBhAG0AZQA9ACIAQwBvAGwAbwByAGYAdQBs +ACAATABpAHMAdAAgAEEAYwBjAGUAbgB0ACAAMwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUA +eABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwBy +AGkAdAB5AD0AIgA3ADMAIgAgAE4AYQBtAGUAPQAiAEMAbwBsAG8AcgBmAHUAbAAgAEcAcgBpAGQA +IABBAGMAYwBlAG4AdAAgADMAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABp +AG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIA +NgAwACIAIABOAGEAbQBlAD0AIgBMAGkAZwBoAHQAIABTAGgAYQBkAGkAbgBnACAAQQBjAGMAZQBu +AHQAIAA0ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8A +YwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADYAMQAiACAATgBh +AG0AZQA9ACIATABpAGcAaAB0ACAATABpAHMAdAAgAEEAYwBjAGUAbgB0ACAANAAiAC8APgANAAoA +IAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBs +AHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA2ADIAIgAgAE4AYQBtAGUAPQAiAEwAaQBnAGgA +dAAgAEcAcgBpAGQAIABBAGMAYwBlAG4AdAAgADQAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABF +AHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8A +cgBpAHQAeQA9ACIANgAzACIAIABOAGEAbQBlAD0AIgBNAGUAZABpAHUAbQAgAFMAaABhAGQAaQBu +AGcAIAAxACAAQQBjAGMAZQBuAHQAIAA0ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMA +ZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0 +AHkAPQAiADYANAAiACAATgBhAG0AZQA9ACIATQBlAGQAaQB1AG0AIABTAGgAYQBkAGkAbgBnACAA +MgAgAEEAYwBjAGUAbgB0ACAANAAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0 +AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0A +IgA2ADUAIgAgAE4AYQBtAGUAPQAiAE0AZQBkAGkAdQBtACAATABpAHMAdAAgADEAIABBAGMAYwBl +AG4AdAAgADQAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwA +bwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANgA2ACIAIABO +AGEAbQBlAD0AIgBNAGUAZABpAHUAbQAgAEwAaQBzAHQAIAAyACAAQQBjAGMAZQBuAHQAIAA0ACIA +LwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9 +ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADYANwAiACAATgBhAG0AZQA9ACIA +TQBlAGQAaQB1AG0AIABHAHIAaQBkACAAMQAgAEEAYwBjAGUAbgB0ACAANAAiAC8APgANAAoAIAAg +ADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMA +ZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA2ADgAIgAgAE4AYQBtAGUAPQAiAE0AZQBkAGkAdQBt +ACAARwByAGkAZAAgADIAIABBAGMAYwBlAG4AdAAgADQAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMA +ZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBp +AG8AcgBpAHQAeQA9ACIANgA5ACIAIABOAGEAbQBlAD0AIgBNAGUAZABpAHUAbQAgAEcAcgBpAGQA +IAAzACAAQQBjAGMAZQBuAHQAIAA0ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBw +AHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkA +PQAiADcAMAAiACAATgBhAG0AZQA9ACIARABhAHIAawAgAEwAaQBzAHQAIABBAGMAYwBlAG4AdAAg +ADQAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsA +ZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANwAxACIAIABOAGEAbQBl +AD0AIgBDAG8AbABvAHIAZgB1AGwAIABTAGgAYQBkAGkAbgBnACAAQQBjAGMAZQBuAHQAIAA0ACIA +LwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9 +ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADcAMgAiACAATgBhAG0AZQA9ACIA +QwBvAGwAbwByAGYAdQBsACAATABpAHMAdAAgAEEAYwBjAGUAbgB0ACAANAAiAC8APgANAAoAIAAg +ADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMA +ZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA3ADMAIgAgAE4AYQBtAGUAPQAiAEMAbwBsAG8AcgBm +AHUAbAAgAEcAcgBpAGQAIABBAGMAYwBlAG4AdAAgADQAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMA +ZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBp +AG8AcgBpAHQAeQA9ACIANgAwACIAIABOAGEAbQBlAD0AIgBMAGkAZwBoAHQAIABTAGgAYQBkAGkA +bgBnACAAQQBjAGMAZQBuAHQAIAA1ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBw +AHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkA +PQAiADYAMQAiACAATgBhAG0AZQA9ACIATABpAGcAaAB0ACAATABpAHMAdAAgAEEAYwBjAGUAbgB0 +ACAANQAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMA +awBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA2ADIAIgAgAE4AYQBt +AGUAPQAiAEwAaQBnAGgAdAAgAEcAcgBpAGQAIABBAGMAYwBlAG4AdAAgADUAIgAvAD4ADQAKACAA +IAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABz +AGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANgAzACIAIABOAGEAbQBlAD0AIgBNAGUAZABpAHUA +bQAgAFMAaABhAGQAaQBuAGcAIAAxACAAQQBjAGMAZQBuAHQAIAA1ACIALwA+AA0ACgAgACAAPAB3 +ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIA +IABQAHIAaQBvAHIAaQB0AHkAPQAiADYANAAiACAATgBhAG0AZQA9ACIATQBlAGQAaQB1AG0AIABT +AGgAYQBkAGkAbgBnACAAMgAgAEEAYwBjAGUAbgB0ACAANQAiAC8APgANAAoAIAAgADwAdwA6AEwA +cwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUABy +AGkAbwByAGkAdAB5AD0AIgA2ADUAIgAgAE4AYQBtAGUAPQAiAE0AZQBkAGkAdQBtACAATABpAHMA +dAAgADEAIABBAGMAYwBlAG4AdAAgADUAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBl +AHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQA +eQA9ACIANgA2ACIAIABOAGEAbQBlAD0AIgBNAGUAZABpAHUAbQAgAEwAaQBzAHQAIAAyACAAQQBj +AGMAZQBuAHQAIAA1ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4A +IABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADYANwAi +ACAATgBhAG0AZQA9ACIATQBlAGQAaQB1AG0AIABHAHIAaQBkACAAMQAgAEEAYwBjAGUAbgB0ACAA +NQAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBl +AGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA2ADgAIgAgAE4AYQBtAGUA +PQAiAE0AZQBkAGkAdQBtACAARwByAGkAZAAgADIAIABBAGMAYwBlAG4AdAAgADUAIgAvAD4ADQAK +ACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEA +bABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANgA5ACIAIABOAGEAbQBlAD0AIgBNAGUAZABp +AHUAbQAgAEcAcgBpAGQAIAAzACAAQQBjAGMAZQBuAHQAIAA1ACIALwA+AA0ACgAgACAAPAB3ADoA +TABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQ +AHIAaQBvAHIAaQB0AHkAPQAiADcAMAAiACAATgBhAG0AZQA9ACIARABhAHIAawAgAEwAaQBzAHQA +IABBAGMAYwBlAG4AdAAgADUAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABp +AG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIA +NwAxACIAIABOAGEAbQBlAD0AIgBDAG8AbABvAHIAZgB1AGwAIABTAGgAYQBkAGkAbgBnACAAQQBj +AGMAZQBuAHQAIAA1ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4A +IABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADcAMgAi +ACAATgBhAG0AZQA9ACIAQwBvAGwAbwByAGYAdQBsACAATABpAHMAdAAgAEEAYwBjAGUAbgB0ACAA +NQAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBl +AGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA3ADMAIgAgAE4AYQBtAGUA +PQAiAEMAbwBsAG8AcgBmAHUAbAAgAEcAcgBpAGQAIABBAGMAYwBlAG4AdAAgADUAIgAvAD4ADQAK +ACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEA +bABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANgAwACIAIABOAGEAbQBlAD0AIgBMAGkAZwBo +AHQAIABTAGgAYQBkAGkAbgBnACAAQQBjAGMAZQBuAHQAIAA2ACIALwA+AA0ACgAgACAAPAB3ADoA +TABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQ +AHIAaQBvAHIAaQB0AHkAPQAiADYAMQAiACAATgBhAG0AZQA9ACIATABpAGcAaAB0ACAATABpAHMA +dAAgAEEAYwBjAGUAbgB0ACAANgAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0 +AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0A +IgA2ADIAIgAgAE4AYQBtAGUAPQAiAEwAaQBnAGgAdAAgAEcAcgBpAGQAIABBAGMAYwBlAG4AdAAg +ADYAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsA +ZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANgAzACIAIABOAGEAbQBl +AD0AIgBNAGUAZABpAHUAbQAgAFMAaABhAGQAaQBuAGcAIAAxACAAQQBjAGMAZQBuAHQAIAA2ACIA +LwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9 +ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADYANAAiACAATgBhAG0AZQA9ACIA +TQBlAGQAaQB1AG0AIABTAGgAYQBkAGkAbgBnACAAMgAgAEEAYwBjAGUAbgB0ACAANgAiAC8APgAN +AAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYA +YQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA2ADUAIgAgAE4AYQBtAGUAPQAiAE0AZQBk +AGkAdQBtACAATABpAHMAdAAgADEAIABBAGMAYwBlAG4AdAAgADYAIgAvAD4ADQAKACAAIAA8AHcA +OgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAg +AFAAcgBpAG8AcgBpAHQAeQA9ACIANgA2ACIAIABOAGEAbQBlAD0AIgBNAGUAZABpAHUAbQAgAEwA +aQBzAHQAIAAyACAAQQBjAGMAZQBuAHQAIAA2ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4 +AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIA +aQB0AHkAPQAiADYANwAiACAATgBhAG0AZQA9ACIATQBlAGQAaQB1AG0AIABHAHIAaQBkACAAMQAg +AEEAYwBjAGUAbgB0ACAANgAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkA +bwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA2 +ADgAIgAgAE4AYQBtAGUAPQAiAE0AZQBkAGkAdQBtACAARwByAGkAZAAgADIAIABBAGMAYwBlAG4A +dAAgADYAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBj +AGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANgA5ACIAIABOAGEA +bQBlAD0AIgBNAGUAZABpAHUAbQAgAEcAcgBpAGQAIAAzACAAQQBjAGMAZQBuAHQAIAA2ACIALwA+ +AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIA +ZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADcAMAAiACAATgBhAG0AZQA9ACIARABh +AHIAawAgAEwAaQBzAHQAIABBAGMAYwBlAG4AdAAgADYAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMA +ZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBp +AG8AcgBpAHQAeQA9ACIANwAxACIAIABOAGEAbQBlAD0AIgBDAG8AbABvAHIAZgB1AGwAIABTAGgA +YQBkAGkAbgBnACAAQQBjAGMAZQBuAHQAIAA2ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4 +AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIA +aQB0AHkAPQAiADcAMgAiACAATgBhAG0AZQA9ACIAQwBvAGwAbwByAGYAdQBsACAATABpAHMAdAAg +AEEAYwBjAGUAbgB0ACAANgAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkA +bwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA3 +ADMAIgAgAE4AYQBtAGUAPQAiAEMAbwBsAG8AcgBmAHUAbAAgAEcAcgBpAGQAIABBAGMAYwBlAG4A +dAAgADYAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBj +AGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIAMQA5ACIAIABRAEYA +bwByAG0AYQB0AD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAFMAdQBiAHQAbABl +ACAARQBtAHAAaABhAHMAaQBzACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQA +aQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAi +ADIAMQAiACAAUQBGAG8AcgBtAGEAdAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0A +IgBJAG4AdABlAG4AcwBlACAARQBtAHAAaABhAHMAaQBzACIALwA+AA0ACgAgACAAPAB3ADoATABz +AGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIA +aQBvAHIAaQB0AHkAPQAiADMAMQAiACAAUQBGAG8AcgBtAGEAdAA9ACIAdAByAHUAZQAiAA0ACgAg +ACAAIABOAGEAbQBlAD0AIgBTAHUAYgB0AGwAZQAgAFIAZQBmAGUAcgBlAG4AYwBlACIALwA+AA0A +CgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBh +AGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADMAMgAiACAAUQBGAG8AcgBtAGEAdAA9ACIA +dAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBJAG4AdABlAG4AcwBlACAAUgBlAGYAZQBy +AGUAbgBjAGUAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwA +bwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIAMwAzACIAIABR +AEYAbwByAG0AYQB0AD0AIgB0AHIAdQBlACIAIABOAGEAbQBlAD0AIgBCAG8AbwBrACAAVABpAHQA +bABlACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBr +AGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADMANwAiACAAUwBlAG0A +aQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABVAG4AaABpAGQAZQBXAGgAZQBu +AFUAcwBlAGQAPQAiAHQAcgB1AGUAIgAgAE4AYQBtAGUAPQAiAEIAaQBiAGwAaQBvAGcAcgBhAHAA +aAB5ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBr +AGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADMAOQAiACAAUwBlAG0A +aQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABVAG4AaABpAGQAZQBXAGgAZQBu +AFUAcwBlAGQAPQAiAHQAcgB1AGUAIgAgAFEARgBvAHIAbQBhAHQAPQAiAHQAcgB1AGUAIgAgAE4A +YQBtAGUAPQAiAFQATwBDACAASABlAGEAZABpAG4AZwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBk +AEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkA +bwByAGkAdAB5AD0AIgA0ADEAIgAgAE4AYQBtAGUAPQAiAFAAbABhAGkAbgAgAFQAYQBiAGwAZQAg +ADEAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsA +ZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANAAyACIAIABOAGEAbQBl +AD0AIgBQAGwAYQBpAG4AIABUAGEAYgBsAGUAIAAyACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQA +RQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBv +AHIAaQB0AHkAPQAiADQAMwAiACAATgBhAG0AZQA9ACIAUABsAGEAaQBuACAAVABhAGIAbABlACAA +MwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBl +AGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA0ADQAIgAgAE4AYQBtAGUA +PQAiAFAAbABhAGkAbgAgAFQAYQBiAGwAZQAgADQAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABF +AHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8A +cgBpAHQAeQA9ACIANAA1ACIAIABOAGEAbQBlAD0AIgBQAGwAYQBpAG4AIABUAGEAYgBsAGUAIAA1 +ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUA +ZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADQAMAAiACAATgBhAG0AZQA9 +ACIARwByAGkAZAAgAFQAYQBiAGwAZQAgAEwAaQBnAGgAdAAiAC8APgANAAoAIAAgADwAdwA6AEwA +cwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUABy +AGkAbwByAGkAdAB5AD0AIgA0ADYAIgAgAE4AYQBtAGUAPQAiAEcAcgBpAGQAIABUAGEAYgBsAGUA +IAAxACAATABpAGcAaAB0ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBv +AG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADQA +NwAiACAATgBhAG0AZQA9ACIARwByAGkAZAAgAFQAYQBiAGwAZQAgADIAIgAvAD4ADQAKACAAIAA8 +AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUA +IgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANAA4ACIAIABOAGEAbQBlAD0AIgBHAHIAaQBkACAAVABh +AGIAbABlACAAMwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAA +TABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA0ADkAIgAg +AE4AYQBtAGUAPQAiAEcAcgBpAGQAIABUAGEAYgBsAGUAIAA0ACIALwA+AA0ACgAgACAAPAB3ADoA +TABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQ +AHIAaQBvAHIAaQB0AHkAPQAiADUAMAAiACAATgBhAG0AZQA9ACIARwByAGkAZAAgAFQAYQBiAGwA +ZQAgADUAIABEAGEAcgBrACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBv +AG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADUA +MQAiACAATgBhAG0AZQA9ACIARwByAGkAZAAgAFQAYQBiAGwAZQAgADYAIABDAG8AbABvAHIAZgB1 +AGwAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsA +ZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANQAyACIAIABOAGEAbQBl +AD0AIgBHAHIAaQBkACAAVABhAGIAbABlACAANwAgAEMAbwBsAG8AcgBmAHUAbAAiAC8APgANAAoA +IAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBs +AHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA0ADYAIgANAAoAIAAgACAATgBhAG0AZQA9ACIA +RwByAGkAZAAgAFQAYQBiAGwAZQAgADEAIABMAGkAZwBoAHQAIABBAGMAYwBlAG4AdAAgADEAIgAv +AD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0A +IgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANAA3ACIAIABOAGEAbQBlAD0AIgBH +AHIAaQBkACAAVABhAGIAbABlACAAMgAgAEEAYwBjAGUAbgB0ACAAMQAiAC8APgANAAoAIAAgADwA +dwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAi +ACAAUAByAGkAbwByAGkAdAB5AD0AIgA0ADgAIgAgAE4AYQBtAGUAPQAiAEcAcgBpAGQAIABUAGEA +YgBsAGUAIAAzACAAQQBjAGMAZQBuAHQAIAAxACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4 +AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIA +aQB0AHkAPQAiADQAOQAiACAATgBhAG0AZQA9ACIARwByAGkAZAAgAFQAYQBiAGwAZQAgADQAIABB +AGMAYwBlAG4AdAAgADEAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8A +bgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANQAw +ACIAIABOAGEAbQBlAD0AIgBHAHIAaQBkACAAVABhAGIAbABlACAANQAgAEQAYQByAGsAIABBAGMA +YwBlAG4AdAAgADEAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAg +AEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANQAxACIA +DQAKACAAIAAgAE4AYQBtAGUAPQAiAEcAcgBpAGQAIABUAGEAYgBsAGUAIAA2ACAAQwBvAGwAbwBy +AGYAdQBsACAAQQBjAGMAZQBuAHQAIAAxACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMA +ZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0 +AHkAPQAiADUAMgAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBHAHIAaQBkACAAVABhAGIAbABlACAA +NwAgAEMAbwBsAG8AcgBmAHUAbAAgAEEAYwBjAGUAbgB0ACAAMQAiAC8APgANAAoAIAAgADwAdwA6 +AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAA +UAByAGkAbwByAGkAdAB5AD0AIgA0ADYAIgANAAoAIAAgACAATgBhAG0AZQA9ACIARwByAGkAZAAg +AFQAYQBiAGwAZQAgADEAIABMAGkAZwBoAHQAIABBAGMAYwBlAG4AdAAgADIAIgAvAD4ADQAKACAA +IAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABz +AGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANAA3ACIAIABOAGEAbQBlAD0AIgBHAHIAaQBkACAA +VABhAGIAbABlACAAMgAgAEEAYwBjAGUAbgB0ACAAMgAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBk +AEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkA +bwByAGkAdAB5AD0AIgA0ADgAIgAgAE4AYQBtAGUAPQAiAEcAcgBpAGQAIABUAGEAYgBsAGUAIAAz +ACAAQQBjAGMAZQBuAHQAIAAyACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQA +aQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAi +ADQAOQAiACAATgBhAG0AZQA9ACIARwByAGkAZAAgAFQAYQBiAGwAZQAgADQAIABBAGMAYwBlAG4A +dAAgADIAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBj +AGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANQAwACIAIABOAGEA +bQBlAD0AIgBHAHIAaQBkACAAVABhAGIAbABlACAANQAgAEQAYQByAGsAIABBAGMAYwBlAG4AdAAg +ADIAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsA +ZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANQAxACIADQAKACAAIAAg +AE4AYQBtAGUAPQAiAEcAcgBpAGQAIABUAGEAYgBsAGUAIAA2ACAAQwBvAGwAbwByAGYAdQBsACAA +QQBjAGMAZQBuAHQAIAAyACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBv +AG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADUA +MgAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBHAHIAaQBkACAAVABhAGIAbABlACAANwAgAEMAbwBs +AG8AcgBmAHUAbAAgAEEAYwBjAGUAbgB0ACAAMgAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUA +eABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwBy +AGkAdAB5AD0AIgA0ADYAIgANAAoAIAAgACAATgBhAG0AZQA9ACIARwByAGkAZAAgAFQAYQBiAGwA +ZQAgADEAIABMAGkAZwBoAHQAIABBAGMAYwBlAG4AdAAgADMAIgAvAD4ADQAKACAAIAA8AHcAOgBM +AHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAA +cgBpAG8AcgBpAHQAeQA9ACIANAA3ACIAIABOAGEAbQBlAD0AIgBHAHIAaQBkACAAVABhAGIAbABl +ACAAMgAgAEEAYwBjAGUAbgB0ACAAMwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUA +cAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5 +AD0AIgA0ADgAIgAgAE4AYQBtAGUAPQAiAEcAcgBpAGQAIABUAGEAYgBsAGUAIAAzACAAQQBjAGMA +ZQBuAHQAIAAzACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABM +AG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADQAOQAiACAA +TgBhAG0AZQA9ACIARwByAGkAZAAgAFQAYQBiAGwAZQAgADQAIABBAGMAYwBlAG4AdAAgADMAIgAv +AD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0A +IgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANQAwACIAIABOAGEAbQBlAD0AIgBH +AHIAaQBkACAAVABhAGIAbABlACAANQAgAEQAYQByAGsAIABBAGMAYwBlAG4AdAAgADMAIgAvAD4A +DQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBm +AGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANQAxACIADQAKACAAIAAgAE4AYQBtAGUA +PQAiAEcAcgBpAGQAIABUAGEAYgBsAGUAIAA2ACAAQwBvAGwAbwByAGYAdQBsACAAQQBjAGMAZQBu +AHQAIAAzACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8A +YwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADUAMgAiAA0ACgAg +ACAAIABOAGEAbQBlAD0AIgBHAHIAaQBkACAAVABhAGIAbABlACAANwAgAEMAbwBsAG8AcgBmAHUA +bAAgAEEAYwBjAGUAbgB0ACAAMwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0 +AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0A +IgA0ADYAIgANAAoAIAAgACAATgBhAG0AZQA9ACIARwByAGkAZAAgAFQAYQBiAGwAZQAgADEAIABM +AGkAZwBoAHQAIABBAGMAYwBlAG4AdAAgADQAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgA +YwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBp +AHQAeQA9ACIANAA3ACIAIABOAGEAbQBlAD0AIgBHAHIAaQBkACAAVABhAGIAbABlACAAMgAgAEEA +YwBjAGUAbgB0ACAANAAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBu +ACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA0ADgA +IgAgAE4AYQBtAGUAPQAiAEcAcgBpAGQAIABUAGEAYgBsAGUAIAAzACAAQQBjAGMAZQBuAHQAIAA0 +ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUA +ZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADQAOQAiACAATgBhAG0AZQA9 +ACIARwByAGkAZAAgAFQAYQBiAGwAZQAgADQAIABBAGMAYwBlAG4AdAAgADQAIgAvAD4ADQAKACAA +IAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABz +AGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANQAwACIAIABOAGEAbQBlAD0AIgBHAHIAaQBkACAA +VABhAGIAbABlACAANQAgAEQAYQByAGsAIABBAGMAYwBlAG4AdAAgADQAIgAvAD4ADQAKACAAIAA8 +AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUA +IgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANQAxACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAEcAcgBp +AGQAIABUAGEAYgBsAGUAIAA2ACAAQwBvAGwAbwByAGYAdQBsACAAQQBjAGMAZQBuAHQAIAA0ACIA +LwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9 +ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADUAMgAiAA0ACgAgACAAIABOAGEA +bQBlAD0AIgBHAHIAaQBkACAAVABhAGIAbABlACAANwAgAEMAbwBsAG8AcgBmAHUAbAAgAEEAYwBj +AGUAbgB0ACAANAAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAA +TABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA0ADYAIgAN +AAoAIAAgACAATgBhAG0AZQA9ACIARwByAGkAZAAgAFQAYQBiAGwAZQAgADEAIABMAGkAZwBoAHQA +IABBAGMAYwBlAG4AdAAgADUAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABp +AG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIA +NAA3ACIAIABOAGEAbQBlAD0AIgBHAHIAaQBkACAAVABhAGIAbABlACAAMgAgAEEAYwBjAGUAbgB0 +ACAANQAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMA +awBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA0ADgAIgAgAE4AYQBt +AGUAPQAiAEcAcgBpAGQAIABUAGEAYgBsAGUAIAAzACAAQQBjAGMAZQBuAHQAIAA1ACIALwA+AA0A +CgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBh +AGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADQAOQAiACAATgBhAG0AZQA9ACIARwByAGkA +ZAAgAFQAYQBiAGwAZQAgADQAIABBAGMAYwBlAG4AdAAgADUAIgAvAD4ADQAKACAAIAA8AHcAOgBM +AHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAA +cgBpAG8AcgBpAHQAeQA9ACIANQAwACIAIABOAGEAbQBlAD0AIgBHAHIAaQBkACAAVABhAGIAbABl +ACAANQAgAEQAYQByAGsAIABBAGMAYwBlAG4AdAAgADUAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMA +ZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBp +AG8AcgBpAHQAeQA9ACIANQAxACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAEcAcgBpAGQAIABUAGEA +YgBsAGUAIAA2ACAAQwBvAGwAbwByAGYAdQBsACAAQQBjAGMAZQBuAHQAIAA1ACIALwA+AA0ACgAg +ACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwA +cwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADUAMgAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBH +AHIAaQBkACAAVABhAGIAbABlACAANwAgAEMAbwBsAG8AcgBmAHUAbAAgAEEAYwBjAGUAbgB0ACAA +NQAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBl +AGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA0ADYAIgANAAoAIAAgACAA +TgBhAG0AZQA9ACIARwByAGkAZAAgAFQAYQBiAGwAZQAgADEAIABMAGkAZwBoAHQAIABBAGMAYwBl +AG4AdAAgADYAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwA +bwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANAA3ACIAIABO +AGEAbQBlAD0AIgBHAHIAaQBkACAAVABhAGIAbABlACAAMgAgAEEAYwBjAGUAbgB0ACAANgAiAC8A +PgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAi +AGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA0ADgAIgAgAE4AYQBtAGUAPQAiAEcA +cgBpAGQAIABUAGEAYgBsAGUAIAAzACAAQQBjAGMAZQBuAHQAIAA2ACIALwA+AA0ACgAgACAAPAB3 +ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIA +IABQAHIAaQBvAHIAaQB0AHkAPQAiADQAOQAiACAATgBhAG0AZQA9ACIARwByAGkAZAAgAFQAYQBi +AGwAZQAgADQAIABBAGMAYwBlAG4AdAAgADYAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgA +YwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBp +AHQAeQA9ACIANQAwACIAIABOAGEAbQBlAD0AIgBHAHIAaQBkACAAVABhAGIAbABlACAANQAgAEQA +YQByAGsAIABBAGMAYwBlAG4AdAAgADYAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBl +AHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQA +eQA9ACIANQAxACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAEcAcgBpAGQAIABUAGEAYgBsAGUAIAA2 +ACAAQwBvAGwAbwByAGYAdQBsACAAQQBjAGMAZQBuAHQAIAA2ACIALwA+AA0ACgAgACAAPAB3ADoA +TABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQ +AHIAaQBvAHIAaQB0AHkAPQAiADUAMgAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBHAHIAaQBkACAA +VABhAGIAbABlACAANwAgAEMAbwBsAG8AcgBmAHUAbAAgAEEAYwBjAGUAbgB0ACAANgAiAC8APgAN +AAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYA +YQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA0ADYAIgAgAE4AYQBtAGUAPQAiAEwAaQBz +AHQAIABUAGEAYgBsAGUAIAAxACAATABpAGcAaAB0ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQA +RQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBv +AHIAaQB0AHkAPQAiADQANwAiACAATgBhAG0AZQA9ACIATABpAHMAdAAgAFQAYQBiAGwAZQAgADIA +IgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBk +AD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANAA4ACIAIABOAGEAbQBlAD0A +IgBMAGkAcwB0ACAAVABhAGIAbABlACAAMwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABj +AGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkA +dAB5AD0AIgA0ADkAIgAgAE4AYQBtAGUAPQAiAEwAaQBzAHQAIABUAGEAYgBsAGUAIAA0ACIALwA+ +AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIA +ZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADUAMAAiACAATgBhAG0AZQA9ACIATABp +AHMAdAAgAFQAYQBiAGwAZQAgADUAIABEAGEAcgBrACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQA +RQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBv +AHIAaQB0AHkAPQAiADUAMQAiACAATgBhAG0AZQA9ACIATABpAHMAdAAgAFQAYQBiAGwAZQAgADYA +IABDAG8AbABvAHIAZgB1AGwAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABp +AG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIA +NQAyACIAIABOAGEAbQBlAD0AIgBMAGkAcwB0ACAAVABhAGIAbABlACAANwAgAEMAbwBsAG8AcgBm +AHUAbAAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMA +awBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA0ADYAIgANAAoAIAAg +ACAATgBhAG0AZQA9ACIATABpAHMAdAAgAFQAYQBiAGwAZQAgADEAIABMAGkAZwBoAHQAIABBAGMA +YwBlAG4AdAAgADEAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAg +AEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANAA3ACIA +IABOAGEAbQBlAD0AIgBMAGkAcwB0ACAAVABhAGIAbABlACAAMgAgAEEAYwBjAGUAbgB0ACAAMQAi +AC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQA +PQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA0ADgAIgAgAE4AYQBtAGUAPQAi +AEwAaQBzAHQAIABUAGEAYgBsAGUAIAAzACAAQQBjAGMAZQBuAHQAIAAxACIALwA+AA0ACgAgACAA +PAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBl +ACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADQAOQAiACAATgBhAG0AZQA9ACIATABpAHMAdAAgAFQA +YQBiAGwAZQAgADQAIABBAGMAYwBlAG4AdAAgADEAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABF +AHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8A +cgBpAHQAeQA9ACIANQAwACIAIABOAGEAbQBlAD0AIgBMAGkAcwB0ACAAVABhAGIAbABlACAANQAg +AEQAYQByAGsAIABBAGMAYwBlAG4AdAAgADEAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgA +YwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBp +AHQAeQA9ACIANQAxACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAEwAaQBzAHQAIABUAGEAYgBsAGUA +IAA2ACAAQwBvAGwAbwByAGYAdQBsACAAQQBjAGMAZQBuAHQAIAAxACIALwA+AA0ACgAgACAAPAB3 +ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIA +IABQAHIAaQBvAHIAaQB0AHkAPQAiADUAMgAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBMAGkAcwB0 +ACAAVABhAGIAbABlACAANwAgAEMAbwBsAG8AcgBmAHUAbAAgAEEAYwBjAGUAbgB0ACAAMQAiAC8A +PgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAi +AGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA0ADYAIgANAAoAIAAgACAATgBhAG0A +ZQA9ACIATABpAHMAdAAgAFQAYQBiAGwAZQAgADEAIABMAGkAZwBoAHQAIABBAGMAYwBlAG4AdAAg +ADIAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsA +ZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANAA3ACIAIABOAGEAbQBl +AD0AIgBMAGkAcwB0ACAAVABhAGIAbABlACAAMgAgAEEAYwBjAGUAbgB0ACAAMgAiAC8APgANAAoA +IAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBs +AHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA0ADgAIgAgAE4AYQBtAGUAPQAiAEwAaQBzAHQA +IABUAGEAYgBsAGUAIAAzACAAQQBjAGMAZQBuAHQAIAAyACIALwA+AA0ACgAgACAAPAB3ADoATABz +AGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIA +aQBvAHIAaQB0AHkAPQAiADQAOQAiACAATgBhAG0AZQA9ACIATABpAHMAdAAgAFQAYQBiAGwAZQAg +ADQAIABBAGMAYwBlAG4AdAAgADIAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAA +dABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9 +ACIANQAwACIAIABOAGEAbQBlAD0AIgBMAGkAcwB0ACAAVABhAGIAbABlACAANQAgAEQAYQByAGsA +IABBAGMAYwBlAG4AdAAgADIAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABp +AG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIA +NQAxACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAEwAaQBzAHQAIABUAGEAYgBsAGUAIAA2ACAAQwBv +AGwAbwByAGYAdQBsACAAQQBjAGMAZQBuAHQAIAAyACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQA +RQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBv +AHIAaQB0AHkAPQAiADUAMgAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBMAGkAcwB0ACAAVABhAGIA +bABlACAANwAgAEMAbwBsAG8AcgBmAHUAbAAgAEEAYwBjAGUAbgB0ACAAMgAiAC8APgANAAoAIAAg +ADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMA +ZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA0ADYAIgANAAoAIAAgACAATgBhAG0AZQA9ACIATABp +AHMAdAAgAFQAYQBiAGwAZQAgADEAIABMAGkAZwBoAHQAIABBAGMAYwBlAG4AdAAgADMAIgAvAD4A +DQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBm +AGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANAA3ACIAIABOAGEAbQBlAD0AIgBMAGkA +cwB0ACAAVABhAGIAbABlACAAMgAgAEEAYwBjAGUAbgB0ACAAMwAiAC8APgANAAoAIAAgADwAdwA6 +AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAA +UAByAGkAbwByAGkAdAB5AD0AIgA0ADgAIgAgAE4AYQBtAGUAPQAiAEwAaQBzAHQAIABUAGEAYgBs +AGUAIAAzACAAQQBjAGMAZQBuAHQAIAAzACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMA +ZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0 +AHkAPQAiADQAOQAiACAATgBhAG0AZQA9ACIATABpAHMAdAAgAFQAYQBiAGwAZQAgADQAIABBAGMA +YwBlAG4AdAAgADMAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAg +AEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANQAwACIA +IABOAGEAbQBlAD0AIgBMAGkAcwB0ACAAVABhAGIAbABlACAANQAgAEQAYQByAGsAIABBAGMAYwBl +AG4AdAAgADMAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwA +bwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANQAxACIADQAK +ACAAIAAgAE4AYQBtAGUAPQAiAEwAaQBzAHQAIABUAGEAYgBsAGUAIAA2ACAAQwBvAGwAbwByAGYA +dQBsACAAQQBjAGMAZQBuAHQAIAAzACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBw +AHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkA +PQAiADUAMgAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBMAGkAcwB0ACAAVABhAGIAbABlACAANwAg +AEMAbwBsAG8AcgBmAHUAbAAgAEEAYwBjAGUAbgB0ACAAMwAiAC8APgANAAoAIAAgADwAdwA6AEwA +cwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUABy +AGkAbwByAGkAdAB5AD0AIgA0ADYAIgANAAoAIAAgACAATgBhAG0AZQA9ACIATABpAHMAdAAgAFQA +YQBiAGwAZQAgADEAIABMAGkAZwBoAHQAIABBAGMAYwBlAG4AdAAgADQAIgAvAD4ADQAKACAAIAA8 +AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUA +IgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANAA3ACIAIABOAGEAbQBlAD0AIgBMAGkAcwB0ACAAVABh +AGIAbABlACAAMgAgAEEAYwBjAGUAbgB0ACAANAAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUA +eABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwBy +AGkAdAB5AD0AIgA0ADgAIgAgAE4AYQBtAGUAPQAiAEwAaQBzAHQAIABUAGEAYgBsAGUAIAAzACAA +QQBjAGMAZQBuAHQAIAA0ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBv +AG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADQA +OQAiACAATgBhAG0AZQA9ACIATABpAHMAdAAgAFQAYQBiAGwAZQAgADQAIABBAGMAYwBlAG4AdAAg +ADQAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsA +ZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANQAwACIAIABOAGEAbQBl +AD0AIgBMAGkAcwB0ACAAVABhAGIAbABlACAANQAgAEQAYQByAGsAIABBAGMAYwBlAG4AdAAgADQA +IgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBk +AD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANQAxACIADQAKACAAIAAgAE4A +YQBtAGUAPQAiAEwAaQBzAHQAIABUAGEAYgBsAGUAIAA2ACAAQwBvAGwAbwByAGYAdQBsACAAQQBj +AGMAZQBuAHQAIAA0ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4A +IABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADUAMgAi +AA0ACgAgACAAIABOAGEAbQBlAD0AIgBMAGkAcwB0ACAAVABhAGIAbABlACAANwAgAEMAbwBsAG8A +cgBmAHUAbAAgAEEAYwBjAGUAbgB0ACAANAAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABj +AGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkA +dAB5AD0AIgA0ADYAIgANAAoAIAAgACAATgBhAG0AZQA9ACIATABpAHMAdAAgAFQAYQBiAGwAZQAg +ADEAIABMAGkAZwBoAHQAIABBAGMAYwBlAG4AdAAgADUAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMA +ZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBp +AG8AcgBpAHQAeQA9ACIANAA3ACIAIABOAGEAbQBlAD0AIgBMAGkAcwB0ACAAVABhAGIAbABlACAA +MgAgAEEAYwBjAGUAbgB0ACAANQAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0 +AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0A +IgA0ADgAIgAgAE4AYQBtAGUAPQAiAEwAaQBzAHQAIABUAGEAYgBsAGUAIAAzACAAQQBjAGMAZQBu +AHQAIAA1ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8A +YwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADQAOQAiACAATgBh +AG0AZQA9ACIATABpAHMAdAAgAFQAYQBiAGwAZQAgADQAIABBAGMAYwBlAG4AdAAgADUAIgAvAD4A +DQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBm +AGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANQAwACIAIABOAGEAbQBlAD0AIgBMAGkA +cwB0ACAAVABhAGIAbABlACAANQAgAEQAYQByAGsAIABBAGMAYwBlAG4AdAAgADUAIgAvAD4ADQAK +ACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEA +bABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANQAxACIADQAKACAAIAAgAE4AYQBtAGUAPQAi +AEwAaQBzAHQAIABUAGEAYgBsAGUAIAA2ACAAQwBvAGwAbwByAGYAdQBsACAAQQBjAGMAZQBuAHQA +IAA1ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBr +AGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADUAMgAiAA0ACgAgACAA +IABOAGEAbQBlAD0AIgBMAGkAcwB0ACAAVABhAGIAbABlACAANwAgAEMAbwBsAG8AcgBmAHUAbAAg +AEEAYwBjAGUAbgB0ACAANQAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkA +bwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA0 +ADYAIgANAAoAIAAgACAATgBhAG0AZQA9ACIATABpAHMAdAAgAFQAYQBiAGwAZQAgADEAIABMAGkA +ZwBoAHQAIABBAGMAYwBlAG4AdAAgADYAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBl +AHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQA +eQA9ACIANAA3ACIAIABOAGEAbQBlAD0AIgBMAGkAcwB0ACAAVABhAGIAbABlACAAMgAgAEEAYwBj +AGUAbgB0ACAANgAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAA +TABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA0ADgAIgAg +AE4AYQBtAGUAPQAiAEwAaQBzAHQAIABUAGEAYgBsAGUAIAAzACAAQQBjAGMAZQBuAHQAIAA2ACIA +LwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9 +ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADQAOQAiACAATgBhAG0AZQA9ACIA +TABpAHMAdAAgAFQAYQBiAGwAZQAgADQAIABBAGMAYwBlAG4AdAAgADYAIgAvAD4ADQAKACAAIAA8 +AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUA +IgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANQAwACIAIABOAGEAbQBlAD0AIgBMAGkAcwB0ACAAVABh +AGIAbABlACAANQAgAEQAYQByAGsAIABBAGMAYwBlAG4AdAAgADYAIgAvAD4ADQAKACAAIAA8AHcA +OgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAg +AFAAcgBpAG8AcgBpAHQAeQA9ACIANQAxACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAEwAaQBzAHQA +IABUAGEAYgBsAGUAIAA2ACAAQwBvAGwAbwByAGYAdQBsACAAQQBjAGMAZQBuAHQAIAA2ACIALwA+ +AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIA +ZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADUAMgAiAA0ACgAgACAAIABOAGEAbQBl +AD0AIgBMAGkAcwB0ACAAVABhAGIAbABlACAANwAgAEMAbwBsAG8AcgBmAHUAbAAgAEEAYwBjAGUA +bgB0ACAANgAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABv +AGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUA +ZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAg +AE4AYQBtAGUAPQAiAE0AZQBuAHQAaQBvAG4AIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgA +YwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABp +AGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIA +dAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBTAG0AYQByAHQAIABIAHkAcABlAHIAbABp +AG4AawAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMA +awBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAi +ACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4A +YQBtAGUAPQAiAEgAYQBzAGgAdABhAGcAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBl +AHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQA +ZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdABy +AHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBVAG4AcgBlAHMAbwBsAHYAZQBkACAATQBlAG4A +dABpAG8AbgAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABv +AGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUA +ZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAg +AE4AYQBtAGUAPQAiAFMAbQBhAHIAdAAgAEwAaQBuAGsAIgAvAD4ADQAKACAAPAAvAHcAOgBMAGEA +dABlAG4AdABTAHQAeQBsAGUAcwA+AA0ACgA8AC8AeABtAGwAPgA8ACEAWwBlAG4AZABpAGYAXQAt +AC0APgANAAoAPABzAHQAeQBsAGUAPgANAAoAPAAhAC0ALQANAAoAQABtAGUAZABpAGEAIABwAHIA +aQBuAHQAIAB7AA0ACgAgACAAIAAgACMAbQBhAGkAbgAgAHsADQAKACAAIAAgACAAIAAgACAAIABw +AGEAZABkAGkAbgBnAC0AYgBvAHQAdABvAG0AOgAgADEAZQBtACAAIQBpAG0AcABvAHIAdABhAG4A +dAA7ACAALwAqACAAVABoAGUAIABkAGUAZgBhAHUAbAB0ACAAcABhAGQAZABpAG4AZwAgAG8AZgAg +ADYAZQBtACAAaQBzACAAdABvAG8AIABtAHUAYwBoACAAZgBvAHIAIABwAHIAaQBuAHQAbwB1AHQA +cwAgACoALwANAAoAIAAgACAAIAB9AA0ACgANAAoAIAAgACAAIABiAG8AZAB5ACAAewANAAoAIAAg +ACAAIAAgACAAIAAgAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6ACAAQQByAGkAYQBsACwAIABIAGUA +bAB2AGUAdABpAGMAYQAsACAARgByAGUAZQBTAGEAbgBzACwAIABzAGEAbgBzAC0AcwBlAHIAaQBm +ADsADQAKACAAIAAgACAAIAAgACAAIABmAG8AbgB0AC0AcwBpAHoAZQA6ACAAMQAwAHAAdAA7AA0A +CgAgACAAIAAgACAAIAAgACAAbABpAG4AZQAtAGgAZQBpAGcAaAB0ADoAIAAxAC4AMgA7AA0ACgAg +ACAAIAAgAH0ADQAKAA0ACgAgACAAIAAgAGIAbwBkAHkALAAgACMAZgB1AGwAbAAtAGgAZQBpAGcA +aAB0AC0AYwBvAG4AdABhAGkAbgBlAHIALAAgACMAbQBhAGkAbgAsACAAIwBwAGEAZwBlACwAIAAj +AGMAbwBuAHQAZQBuAHQALAAgAC4AaABhAHMALQBwAGUAcgBzAG8AbgBhAGwALQBzAGkAZABlAGIA +YQByACAAIwBjAG8AbgB0AGUAbgB0ACAAewANAAoAIAAgACAAIAAgACAAIAAgAGIAYQBjAGsAZwBy +AG8AdQBuAGQAOgAgACMAZgBmAGYAIAAhAGkAbQBwAG8AcgB0AGEAbgB0ADsADQAKACAAIAAgACAA +IAAgACAAIABjAG8AbABvAHIAOgAgACMAMAAwADAAIAAhAGkAbQBwAG8AcgB0AGEAbgB0ADsADQAK +ACAAIAAgACAAIAAgACAAIABiAG8AcgBkAGUAcgA6ACAAMAAgACEAaQBtAHAAbwByAHQAYQBuAHQA +OwANAAoAIAAgACAAIAAgACAAIAAgAHcAaQBkAHQAaAA6ACAAMQAwADAAJQAgACEAaQBtAHAAbwBy +AHQAYQBuAHQAOwANAAoAIAAgACAAIAAgACAAIAAgAGgAZQBpAGcAaAB0ADoAIABhAHUAdABvACAA +IQBpAG0AcABvAHIAdABhAG4AdAA7AA0ACgAgACAAIAAgACAAIAAgACAAbQBpAG4ALQBoAGUAaQBn +AGgAdAA6ACAAYQB1AHQAbwAgACEAaQBtAHAAbwByAHQAYQBuAHQAOwANAAoAIAAgACAAIAAgACAA +IAAgAG0AYQByAGcAaQBuADoAIAAwACAAIQBpAG0AcABvAHIAdABhAG4AdAA7AA0ACgAgACAAIAAg +ACAAIAAgACAAcABhAGQAZABpAG4AZwA6ACAAMAAgACEAaQBtAHAAbwByAHQAYQBuAHQAOwANAAoA +IAAgACAAIAAgACAAIAAgAGQAaQBzAHAAbABhAHkAOgAgAGIAbABvAGMAawAgACEAaQBtAHAAbwBy +AHQAYQBuAHQAOwANAAoAIAAgACAAIAB9AA0ACgANAAoAIAAgACAAIABhACwAIABhADoAbABpAG4A +awAsACAAYQA6AHYAaQBzAGkAdABlAGQALAAgAGEAOgBmAG8AYwB1AHMALAAgAGEAOgBoAG8AdgBl +AHIALAAgAGEAOgBhAGMAdABpAHYAZQAgAHsADQAKACAAIAAgACAAIAAgACAAIABjAG8AbABvAHIA +OgAgACMAMAAwADAAOwANAAoAIAAgACAAIAB9AA0ACgANAAoAIAAgACAAIAAjAGMAbwBuAHQAZQBu +AHQAIABoADEALAANAAoAIAAgACAAIAAjAGMAbwBuAHQAZQBuAHQAIABoADIALAANAAoAIAAgACAA +IAAjAGMAbwBuAHQAZQBuAHQAIABoADMALAANAAoAIAAgACAAIAAjAGMAbwBuAHQAZQBuAHQAIABo +ADQALAANAAoAIAAgACAAIAAjAGMAbwBuAHQAZQBuAHQAIABoADUALAANAAoAIAAgACAAIAAjAGMA +bwBuAHQAZQBuAHQAIABoADYAIAB7AA0ACgAgACAAIAAgACAAIAAgACAAZgBvAG4AdAAtAGYAYQBt +AGkAbAB5ADoAIABBAHIAaQBhAGwALAAgAEgAZQBsAHYAZQB0AGkAYwBhACwAIABGAHIAZQBlAFMA +YQBuAHMALAAgAHMAYQBuAHMALQBzAGUAcgBpAGYAOwANAAoAIAAgACAAIAAgACAAIAAgAHAAYQBn +AGUALQBiAHIAZQBhAGsALQBhAGYAdABlAHIAOgAgAGEAdgBvAGkAZAA7AA0ACgAgACAAIAAgAH0A +DQAKAA0ACgAgACAAIAAgAHAAcgBlACAAewANAAoAIAAgACAAIAAgACAAIAAgAGYAbwBuAHQALQBm +AGEAbQBpAGwAeQA6ACAATQBvAG4AYQBjAG8ALAAgACIAQwBvAHUAcgBpAGUAcgAgAE4AZQB3ACIA +LAAgAG0AbwBuAG8AcwBwAGEAYwBlADsADQAKACAAIAAgACAAfQANAAoADQAKACAAIAAgACAAIwBo +AGUAYQBkAGUAcgAsAA0ACgAgACAAIAAgAC4AYQB1AGkALQBoAGUAYQBkAGUAcgAtAGkAbgBuAGUA +cgAsAA0ACgAgACAAIAAgACMAbgBhAHYAaQBnAGEAdABpAG8AbgAsAA0ACgAgACAAIAAgACMAcwBp +AGQAZQBiAGEAcgAsAA0ACgAgACAAIAAgAC4AcwBpAGQAZQBiAGEAcgAsAA0ACgAgACAAIAAgACMA +cABlAHIAcwBvAG4AYQBsAC0AaQBuAGYAbwAtAHMAaQBkAGUAYgBhAHIALAANAAoAIAAgACAAIAAu +AGkAYQAtAGYAaQB4AGUAZAAtAHMAaQBkAGUAYgBhAHIALAANAAoAIAAgACAAIAAuAHAAYQBnAGUA +LQBhAGMAdABpAG8AbgBzACwADQAKACAAIAAgACAALgBuAGEAdgBtAGUAbgB1ACwADQAKACAAIAAg +ACAALgBhAGoAcwAtAG0AZQBuAHUALQBiAGEAcgAsAA0ACgAgACAAIAAgAC4AbgBvAHAAcgBpAG4A +dAAsAA0ACgAgACAAIAAgAC4AaQBuAGwAaQBuAGUALQBjAG8AbgB0AHIAbwBsAC0AbABpAG4AawAs +AA0ACgAgACAAIAAgAC4AaQBuAGwAaQBuAGUALQBjAG8AbgB0AHIAbwBsAC0AbABpAG4AawAgAGEA +LAANAAoAIAAgACAAIABhAC4AcwBoAG8AdwAtAGwAYQBiAGUAbABzAC0AZQBkAGkAdABvAHIALAAN +AAoAIAAgACAAIAAuAGcAbABvAGIAYQBsAC0AYwBvAG0AbQBlAG4AdAAtAGEAYwB0AGkAbwBuAHMA +LAANAAoAIAAgACAAIAAuAGMAbwBtAG0AZQBuAHQALQBhAGMAdABpAG8AbgBzACwADQAKACAAIAAg +ACAALgBxAHUAaQBjAGsALQBjAG8AbQBtAGUAbgB0AC0AYwBvAG4AdABhAGkAbgBlAHIALAANAAoA +IAAgACAAIAAjAGEAZABkAGMAbwBtAG0AZQBuAHQAIAB7AA0ACgAgACAAIAAgACAAIAAgACAAZABp +AHMAcABsAGEAeQA6ACAAbgBvAG4AZQAgACEAaQBtAHAAbwByAHQAYQBuAHQAOwANAAoAIAAgACAA +IAB9AA0ACgANAAoAIAAgACAAIAAvACoAIABDAE8ATgBGAC0AMgA4ADUANAA0ACAAYwBhAG4AbgBv +AHQAIABwAHIAaQBuAHQAIABtAHUAbAB0AGkAcABsAGUAIABwAGEAZwBlAHMAIABpAG4AIABJAEUA +IAAqAC8ADQAKACAAIAAgACAAIwBzAHAAbABpAHQAdABlAHIALQBjAG8AbgB0AGUAbgB0ACAAewAN +AAoAIAAgACAAIAAgACAAIAAgAHAAbwBzAGkAdABpAG8AbgA6ACAAcgBlAGwAYQB0AGkAdgBlACAA +IQBpAG0AcABvAHIAdABhAG4AdAA7AA0ACgAgACAAIAAgAH0ADQAKAA0ACgAgACAAIAAgAC4AYwBv +AG0AbQBlAG4AdAAgAC4AZABhAHQAZQA6ADoAYgBlAGYAbwByAGUAIAB7AA0ACgAgACAAIAAgACAA +IAAgACAAYwBvAG4AdABlAG4AdAA6ACAAbgBvAG4AZQAgACEAaQBtAHAAbwByAHQAYQBuAHQAOwAg +AC8AKgAgAHIAZQBtAG8AdgBlACAAbQBpAGQAZABvAHQAIABmAG8AcgAgAHAAcgBpAG4AdAAgAHYA +aQBlAHcAIAAqAC8ADQAKACAAIAAgACAAfQANAAoADQAKACAAIAAgACAAaAAxAC4AcABhAGcAZQB0 +AGkAdABsAGUAIABpAG0AZwAgAHsADQAKACAAIAAgACAAIAAgACAAIABoAGUAaQBnAGgAdAA6ACAA +YQB1AHQAbwA7AA0ACgAgACAAIAAgACAAIAAgACAAdwBpAGQAdABoADoAIABhAHUAdABvADsADQAK +ACAAIAAgACAAfQANAAoADQAKACAAIAAgACAALgBwAHIAaQBuAHQALQBvAG4AbAB5ACAAewANAAoA +IAAgACAAIAAgACAAIAAgAGQAaQBzAHAAbABhAHkAOgAgAGIAbABvAGMAawA7AA0ACgAgACAAIAAg +AH0ADQAKAA0ACgAgACAAIAAgACMAZgBvAG8AdABlAHIAIAB7AA0ACgAgACAAIAAgACAAIAAgACAA +cABvAHMAaQB0AGkAbwBuADoAIAByAGUAbABhAHQAaQB2AGUAIAAhAGkAbQBwAG8AcgB0AGEAbgB0 +ADsAIAAvACoAIABDAE8ATgBGAC0AMQA3ADUAMAA2ACAAUABsAGEAYwBlACAAdABoAGUAIABmAG8A +bwB0AGUAcgAgAGEAdAAgAGUAbgBkACAAbwBmACAAdABoAGUAIABjAG8AbgB0AGUAbgB0ACAAKgAv +AA0ACgAgACAAIAAgACAAIAAgACAAbQBhAHIAZwBpAG4AOgAgADAAOwANAAoAIAAgACAAIAAgACAA +IAAgAHAAYQBkAGQAaQBuAGcAOgAgADAAOwANAAoAIAAgACAAIAAgACAAIAAgAGIAYQBjAGsAZwBy +AG8AdQBuAGQAOgAgAG4AbwBuAGUAOwANAAoAIAAgACAAIAAgACAAIAAgAGMAbABlAGEAcgA6ACAA +YgBvAHQAaAA7AA0ACgAgACAAIAAgAH0ADQAKAA0ACgAgACAAIAAgACMAcABvAHcAZQByAGUAZABi +AHkAIAB7AA0ACgAgACAAIAAgACAAIAAgACAAYgBvAHIAZABlAHIALQB0AG8AcAA6ACAAbgBvAG4A +ZQA7AA0ACgAgACAAIAAgACAAIAAgACAAYgBhAGMAawBnAHIAbwB1AG4AZAA6ACAAbgBvAG4AZQA7 +AA0ACgAgACAAIAAgAH0ADQAKAA0ACgAgACAAIAAgACMAcABvAHcAZQByAGUAZABiAHkAIABsAGkA +LgBwAHIAaQBuAHQALQBvAG4AbAB5ACAAewANAAoAIAAgACAAIAAgACAAIAAgAGQAaQBzAHAAbABh +AHkAOgAgAGwAaQBzAHQALQBpAHQAZQBtADsADQAKACAAIAAgACAAIAAgACAAIABmAG8AbgB0AC0A +cwB0AHkAbABlADoAIABpAHQAYQBsAGkAYwA7AA0ACgAgACAAIAAgAH0ADQAKAA0ACgAgACAAIAAg +ACMAcABvAHcAZQByAGUAZABiAHkAIABsAGkALgBuAG8AcAByAGkAbgB0ACAAewANAAoAIAAgACAA +IAAgACAAIAAgAGQAaQBzAHAAbABhAHkAOgAgAG4AbwBuAGUAOwANAAoAIAAgACAAIAB9AA0ACgAN +AAoAIAAgACAAIAAvACoAIABuAG8AIAB3AGkAZAB0AGgAIABjAG8AbgB0AHIAbwBsAHMAIABpAG4A +IABwAHIAaQBuAHQAIAAqAC8ADQAKACAAIAAgACAALgB3AGkAawBpAC0AYwBvAG4AdABlAG4AdAAg +AC4AdABhAGIAbABlAC0AdwByAGEAcAAsAA0ACgAgACAAIAAgAC4AdwBpAGsAaQAtAGMAbwBuAHQA +ZQBuAHQAIABwACwADQAKACAAIAAgACAALgBwAGEAbgBlAGwAIAAuAGMAbwBkAGUAQwBvAG4AdABl +AG4AdAAsAA0ACgAgACAAIAAgAC4AcABhAG4AZQBsACAALgBjAG8AZABlAEMAbwBuAHQAZQBuAHQA +IABwAHIAZQAsAA0ACgAgACAAIAAgAC4AaQBtAGEAZwBlAC0AdwByAGEAcAAgAHsADQAKACAAIAAg +ACAAIAAgACAAIABvAHYAZQByAGYAbABvAHcAOgAgAHYAaQBzAGkAYgBsAGUAIAAhAGkAbQBwAG8A +cgB0AGEAbgB0ADsADQAKACAAIAAgACAAfQANAAoADQAKACAAIAAgACAALwAqACAAVABPAEQATwAg +AC0AIABzAGgAbwB1AGwAZAAgAHQAaABpAHMAIAB3AG8AcgBrAD8AIAAqAC8ADQAKACAAIAAgACAA +IwBjAGgAaQBsAGQAcgBlAG4ALQBzAGUAYwB0AGkAbwBuACwADQAKACAAIAAgACAAIwBjAG8AbQBt +AGUAbgB0AHMALQBzAGUAYwB0AGkAbwBuACAALgBjAG8AbQBtAGUAbgB0ACwADQAKACAAIAAgACAA +IwBjAG8AbQBtAGUAbgB0AHMALQBzAGUAYwB0AGkAbwBuACAALgBjAG8AbQBtAGUAbgB0ACAALgBj +AG8AbQBtAGUAbgB0AC0AYgBvAGQAeQAsAA0ACgAgACAAIAAgACMAYwBvAG0AbQBlAG4AdABzAC0A +cwBlAGMAdABpAG8AbgAgAC4AYwBvAG0AbQBlAG4AdAAgAC4AYwBvAG0AbQBlAG4AdAAtAGMAbwBu +AHQAZQBuAHQALAANAAoAIAAgACAAIAAjAGMAbwBtAG0AZQBuAHQAcwAtAHMAZQBjAHQAaQBvAG4A +IAAuAGMAbwBtAG0AZQBuAHQAIABwACAAewANAAoAIAAgACAAIAAgACAAIAAgAHAAYQBnAGUALQBi +AHIAZQBhAGsALQBpAG4AcwBpAGQAZQA6ACAAYQB2AG8AaQBkADsADQAKACAAIAAgACAAfQANAAoA +DQAKACAAIAAgACAAIwBwAGEAZwBlAC0AYwBoAGkAbABkAHIAZQBuACAAYQAgAHsADQAKACAAIAAg +ACAAIAAgACAAIAB0AGUAeAB0AC0AZABlAGMAbwByAGEAdABpAG8AbgA6ACAAbgBvAG4AZQA7AA0A +CgAgACAAIAAgAH0ADQAKAA0ACgAgACAAIAAgAC8AKgAqAA0ACgAgACAAIAAgACAAaABpAGQAZQAg +AHQAdwBpAHgAaQBlAHMADQAKAA0ACgAgACAAIAAgACAAdABoAGUAIABzAHAAZQBjAGkAZgBpAGMA +aQB0AHkAIABoAGUAcgBlACAAaQBzACAAYQAgAGgAYQBjAGsAIABiAGUAYwBhAHUAcwBlACAAcABy +AGkAbgB0ACAAcwB0AHkAbABlAHMADQAKACAAIAAgACAAIABhAHIAZQAgAGcAZQB0AHQAaQBuAGcA +IABsAG8AYQBkAGUAZAAgAGIAZQBmAG8AcgBlACAAdABoAGUAIABiAGEAcwBlACAAcwB0AHkAbABl +AHMALgAgACoALwANAAoAIAAgACAAIAAjAGMAbwBtAG0AZQBuAHQAcwAtAHMAZQBjAHQAaQBvAG4A +LgBwAGEAZwBlAFMAZQBjAHQAaQBvAG4AIAAuAHMAZQBjAHQAaQBvAG4ALQBoAGUAYQBkAGUAcgAs +AA0ACgAgACAAIAAgACMAYwBvAG0AbQBlAG4AdABzAC0AcwBlAGMAdABpAG8AbgAuAHAAYQBnAGUA +UwBlAGMAdABpAG8AbgAgAC4AcwBlAGMAdABpAG8AbgAtAHQAaQB0AGwAZQAsAA0ACgAgACAAIAAg +ACMAYwBoAGkAbABkAHIAZQBuAC0AcwBlAGMAdABpAG8AbgAuAHAAYQBnAGUAUwBlAGMAdABpAG8A +bgAgAC4AcwBlAGMAdABpAG8AbgAtAGgAZQBhAGQAZQByACwADQAKACAAIAAgACAAIwBjAGgAaQBs +AGQAcgBlAG4ALQBzAGUAYwB0AGkAbwBuAC4AcABhAGcAZQBTAGUAYwB0AGkAbwBuACAALgBzAGUA +YwB0AGkAbwBuAC0AdABpAHQAbABlACwADQAKACAAIAAgACAALgBjAGgAaQBsAGQAcgBlAG4ALQBz +AGgAbwB3AC0AaABpAGQAZQAgAHsADQAKACAAIAAgACAAIAAgACAAIABwAGEAZABkAGkAbgBnAC0A +bABlAGYAdAA6ACAAMAA7AA0ACgAgACAAIAAgACAAIAAgACAAbQBhAHIAZwBpAG4ALQBsAGUAZgB0 +ADoAIAAwADsADQAKACAAIAAgACAAfQANAAoADQAKACAAIAAgACAALgBjAGgAaQBsAGQAcgBlAG4A +LQBzAGgAbwB3AC0AaABpAGQAZQAuAGkAYwBvAG4AIAB7AA0ACgAgACAAIAAgACAAIAAgACAAZABp +AHMAcABsAGEAeQA6ACAAbgBvAG4AZQA7AA0ACgAgACAAIAAgAH0ADQAKAA0ACgAgACAAIAAgAC8A +KgAgAHAAZQByAHMAbwBuAGEAbAAgAHMAaQBkAGUAYgBhAHIAIAAqAC8ADQAKACAAIAAgACAALgBo +AGEAcwAtAHAAZQByAHMAbwBuAGEAbAAtAHMAaQBkAGUAYgBhAHIAIAAjAGMAbwBuAHQAZQBuAHQA +IAB7AA0ACgAgACAAIAAgACAAIAAgACAAbQBhAHIAZwBpAG4ALQByAGkAZwBoAHQAOgAgADAAcAB4 +ADsADQAKACAAIAAgACAAfQANAAoADQAKACAAIAAgACAALgBoAGEAcwAtAHAAZQByAHMAbwBuAGEA +bAAtAHMAaQBkAGUAYgBhAHIAIAAjAGMAbwBuAHQAZQBuAHQAIAAuAHAAYQBnAGUAUwBlAGMAdABp +AG8AbgAgAHsADQAKACAAIAAgACAAIAAgACAAIABtAGEAcgBnAGkAbgAtAHIAaQBnAGgAdAA6ACAA +MABwAHgAOwANAAoAIAAgACAAIAB9AA0ACgANAAoAIAAgACAAIAAuAG4AbwAtAHAAcgBpAG4AdAAs +ACAALgBuAG8ALQBwAHIAaQBuAHQAIAAqACAAewANAAoAIAAgACAAIAAgACAAIAAgAGQAaQBzAHAA +bABhAHkAOgAgAG4AbwBuAGUAIAAhAGkAbQBwAG8AcgB0AGEAbgB0ADsADQAKACAAIAAgACAAfQAN +AAoAfQANAAoADQAKACAALwAqACAARgBvAG4AdAAgAEQAZQBmAGkAbgBpAHQAaQBvAG4AcwAgACoA +LwANAAoAIABAAGYAbwBuAHQALQBmAGEAYwBlAA0ACgAJAHsAZgBvAG4AdAAtAGYAYQBtAGkAbAB5 +ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwANAAoACQBwAGEAbgBvAHMAZQAtADEAOgA1ACAAMAAgADAA +IAAwACAAMAAgADAAIAAwACAAMAAgADAAIAAwADsADQAKAAkAbQBzAG8ALQBmAG8AbgB0AC0AYwBo +AGEAcgBzAGUAdAA6ADIAOwANAAoACQBtAHMAbwAtAGcAZQBuAGUAcgBpAGMALQBmAG8AbgB0AC0A +ZgBhAG0AaQBsAHkAOgBhAHUAdABvADsADQAKAAkAbQBzAG8ALQBmAG8AbgB0AC0AcABpAHQAYwBo +ADoAdgBhAHIAaQBhAGIAbABlADsADQAKAAkAbQBzAG8ALQBmAG8AbgB0AC0AcwBpAGcAbgBhAHQA +dQByAGUAOgAwACAAMgA2ADgANAAzADUANAA1ADYAIAAwACAAMAAgAC0AMgAxADQANwA0ADgAMwA2 +ADQAOAAgADAAOwB9AA0ACgBAAGYAbwBuAHQALQBmAGEAYwBlAA0ACgAJAHsAZgBvAG4AdAAtAGYA +YQBtAGkAbAB5ADoAIgBDAGEAbQBiAHIAaQBhACAATQBhAHQAaAAiADsADQAKAAkAcABhAG4AbwBz +AGUALQAxADoAMgAgADQAIAA1ACAAMwAgADUAIAA0ACAANgAgADMAIAAyACAANAA7AA0ACgAJAG0A +cwBvAC0AZgBvAG4AdAAtAGMAaABhAHIAcwBlAHQAOgAwADsADQAKAAkAbQBzAG8ALQBnAGUAbgBl +AHIAaQBjAC0AZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAcgBvAG0AYQBuADsADQAKAAkAbQBzAG8A +LQBmAG8AbgB0AC0AcABpAHQAYwBoADoAdgBhAHIAaQBhAGIAbABlADsADQAKAAkAbQBzAG8ALQBm +AG8AbgB0AC0AcwBpAGcAbgBhAHQAdQByAGUAOgAzACAAMAAgADAAIAAwACAAMQAgADAAOwB9AA0A +CgBAAGYAbwBuAHQALQBmAGEAYwBlAA0ACgAJAHsAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAIgBT +AGUAZwBvAGUAIABVAEkAIgA7AA0ACgAJAHAAYQBuAG8AcwBlAC0AMQA6ADIAIAAxADEAIAA1ACAA +MgAgADQAIAAyACAANAAgADIAIAAyACAAMwA7AA0ACgAJAG0AcwBvAC0AZgBvAG4AdAAtAGMAaABh +AHIAcwBlAHQAOgAwADsADQAKAAkAbQBzAG8ALQBnAGUAbgBlAHIAaQBjAC0AZgBvAG4AdAAtAGYA +YQBtAGkAbAB5ADoAcwB3AGkAcwBzADsADQAKAAkAbQBzAG8ALQBmAG8AbgB0AC0AcABpAHQAYwBo +ADoAdgBhAHIAaQBhAGIAbABlADsADQAKAAkAbQBzAG8ALQBmAG8AbgB0AC0AcwBpAGcAbgBhAHQA +dQByAGUAOgAtADQANgA5ADcANQAwADAAMQA3ACAALQAxADAANwAzADYAOAAzADMAMgA5ACAAOQAg +ADAAIAA1ADEAMQAgADAAOwB9AA0ACgAgAC8AKgAgAFMAdAB5AGwAZQAgAEQAZQBmAGkAbgBpAHQA +aQBvAG4AcwAgACoALwANAAoAIABwAC4ATQBzAG8ATgBvAHIAbQBhAGwALAAgAGwAaQAuAE0AcwBv +AE4AbwByAG0AYQBsACwAIABkAGkAdgAuAE0AcwBvAE4AbwByAG0AYQBsAA0ACgAJAHsAbQBzAG8A +LQBzAHQAeQBsAGUALQB1AG4AaABpAGQAZQA6AG4AbwA7AA0ACgAJAG0AcwBvAC0AcwB0AHkAbABl +AC0AcQBmAG8AcgBtAGEAdAA6AHkAZQBzADsADQAKAAkAbQBzAG8ALQBzAHQAeQBsAGUALQBwAGEA +cgBlAG4AdAA6ACIAIgA7AA0ACgAJAG0AYQByAGcAaQBuADoAMABjAG0AOwANAAoACQBtAGEAcgBn +AGkAbgAtAGIAbwB0AHQAbwBtADoALgAwADAAMAAxAHAAdAA7AA0ACgAJAG0AcwBvAC0AcABhAGcA +aQBuAGEAdABpAG8AbgA6AHcAaQBkAG8AdwAtAG8AcgBwAGgAYQBuADsADQAKAAkAZgBvAG4AdAAt +AHMAaQB6AGUAOgAxADIALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6ACIA +VABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAsAHMAZQByAGkAZgA7AA0ACgAJAG0AcwBv +AC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAA +TgBlAHcAIABSAG8AbQBhAG4AIgA7AA0ACgAJAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAHQAaABl +AG0AZQAtAGYAbwBuAHQAOgBtAGkAbgBvAHIALQBmAGEAcgBlAGEAcwB0ADsAfQANAAoAaAAxAA0A +CgAJAHsAbQBzAG8ALQBzAHQAeQBsAGUALQBwAHIAaQBvAHIAaQB0AHkAOgA5ADsADQAKAAkAbQBz +AG8ALQBzAHQAeQBsAGUALQB1AG4AaABpAGQAZQA6AG4AbwA7AA0ACgAJAG0AcwBvAC0AcwB0AHkA +bABlAC0AcQBmAG8AcgBtAGEAdAA6AHkAZQBzADsADQAKAAkAbQBzAG8ALQBzAHQAeQBsAGUALQBs +AGkAbgBrADoAIgBIAGUAYQBkAGkAbgBnACAAMQAgAEMAaABhAHIAIgA7AA0ACgAJAG0AcwBvAC0A +bQBhAHIAZwBpAG4ALQB0AG8AcAAtAGEAbAB0ADoAYQB1AHQAbwA7AA0ACgAJAG0AYQByAGcAaQBu +AC0AcgBpAGcAaAB0ADoAMABjAG0AOwANAAoACQBtAHMAbwAtAG0AYQByAGcAaQBuAC0AYgBvAHQA +dABvAG0ALQBhAGwAdAA6AGEAdQB0AG8AOwANAAoACQBtAGEAcgBnAGkAbgAtAGwAZQBmAHQAOgAw +AGMAbQA7AA0ACgAJAG0AcwBvAC0AcABhAGcAaQBuAGEAdABpAG8AbgA6AHcAaQBkAG8AdwAtAG8A +cgBwAGgAYQBuADsADQAKAAkAbQBzAG8ALQBvAHUAdABsAGkAbgBlAC0AbABlAHYAZQBsADoAMQA7 +AA0ACgAJAGYAbwBuAHQALQBzAGkAegBlADoAMgA0AC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0A +ZgBhAG0AaQBsAHkAOgAiAFQAaQBtAGUAcwAgAE4AZQB3ACAAUgBvAG0AYQBuACIALABzAGUAcgBp +AGYAOwANAAoACQBtAHMAbwAtAGYAYQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkA +OgAiAFQAaQBtAGUAcwAgAE4AZQB3ACAAUgBvAG0AYQBuACIAOwANAAoACQBtAHMAbwAtAGYAYQBy +AGUAYQBzAHQALQB0AGgAZQBtAGUALQBmAG8AbgB0ADoAbQBpAG4AbwByAC0AZgBhAHIAZQBhAHMA +dAA7AA0ACgAJAGYAbwBuAHQALQB3AGUAaQBnAGgAdAA6AGIAbwBsAGQAOwB9AA0ACgBoADIADQAK +AAkAewBtAHMAbwAtAHMAdAB5AGwAZQAtAHAAcgBpAG8AcgBpAHQAeQA6ADkAOwANAAoACQBtAHMA +bwAtAHMAdAB5AGwAZQAtAHUAbgBoAGkAZABlADoAbgBvADsADQAKAAkAbQBzAG8ALQBzAHQAeQBs +AGUALQBxAGYAbwByAG0AYQB0ADoAeQBlAHMAOwANAAoACQBtAHMAbwAtAHMAdAB5AGwAZQAtAGwA +aQBuAGsAOgAiAEgAZQBhAGQAaQBuAGcAIAAyACAAQwBoAGEAcgAiADsADQAKAAkAbQBzAG8ALQBt +AGEAcgBnAGkAbgAtAHQAbwBwAC0AYQBsAHQAOgBhAHUAdABvADsADQAKAAkAbQBhAHIAZwBpAG4A +LQByAGkAZwBoAHQAOgAwAGMAbQA7AA0ACgAJAG0AcwBvAC0AbQBhAHIAZwBpAG4ALQBiAG8AdAB0 +AG8AbQAtAGEAbAB0ADoAYQB1AHQAbwA7AA0ACgAJAG0AYQByAGcAaQBuAC0AbABlAGYAdAA6ADAA +YwBtADsADQAKAAkAbQBzAG8ALQBwAGEAZwBpAG4AYQB0AGkAbwBuADoAdwBpAGQAbwB3AC0AbwBy +AHAAaABhAG4AOwANAAoACQBtAHMAbwAtAG8AdQB0AGwAaQBuAGUALQBsAGUAdgBlAGwAOgAyADsA +DQAKAAkAZgBvAG4AdAAtAHMAaQB6AGUAOgAxADgALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBm +AGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAsAHMAZQByAGkA +ZgA7AA0ACgAJAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6 +ACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgA7AA0ACgAJAG0AcwBvAC0AZgBhAHIA +ZQBhAHMAdAAtAHQAaABlAG0AZQAtAGYAbwBuAHQAOgBtAGkAbgBvAHIALQBmAGEAcgBlAGEAcwB0 +ADsADQAKAAkAZgBvAG4AdAAtAHcAZQBpAGcAaAB0ADoAYgBvAGwAZAA7AH0ADQAKAGgAMwANAAoA +CQB7AG0AcwBvAC0AcwB0AHkAbABlAC0AcAByAGkAbwByAGkAdAB5ADoAOQA7AA0ACgAJAG0AcwBv +AC0AcwB0AHkAbABlAC0AdQBuAGgAaQBkAGUAOgBuAG8AOwANAAoACQBtAHMAbwAtAHMAdAB5AGwA +ZQAtAHEAZgBvAHIAbQBhAHQAOgB5AGUAcwA7AA0ACgAJAG0AcwBvAC0AcwB0AHkAbABlAC0AbABp +AG4AawA6ACIASABlAGEAZABpAG4AZwAgADMAIABDAGgAYQByACIAOwANAAoACQBtAHMAbwAtAG0A +YQByAGcAaQBuAC0AdABvAHAALQBhAGwAdAA6AGEAdQB0AG8AOwANAAoACQBtAGEAcgBnAGkAbgAt +AHIAaQBnAGgAdAA6ADAAYwBtADsADQAKAAkAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAGIAbwB0AHQA +bwBtAC0AYQBsAHQAOgBhAHUAdABvADsADQAKAAkAbQBhAHIAZwBpAG4ALQBsAGUAZgB0ADoAMABj +AG0AOwANAAoACQBtAHMAbwAtAHAAYQBnAGkAbgBhAHQAaQBvAG4AOgB3AGkAZABvAHcALQBvAHIA +cABoAGEAbgA7AA0ACgAJAG0AcwBvAC0AbwB1AHQAbABpAG4AZQAtAGwAZQB2AGUAbAA6ADMAOwAN +AAoACQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMwAuADUAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYA +YQBtAGkAbAB5ADoAIgBUAGkAbQBlAHMAIABOAGUAdwAgAFIAbwBtAGEAbgAiACwAcwBlAHIAaQBm +ADsADQAKAAkAbQBzAG8ALQBmAGEAcgBlAGEAcwB0AC0AZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoA +IgBUAGkAbQBlAHMAIABOAGUAdwAgAFIAbwBtAGEAbgAiADsADQAKAAkAbQBzAG8ALQBmAGEAcgBl +AGEAcwB0AC0AdABoAGUAbQBlAC0AZgBvAG4AdAA6AG0AaQBuAG8AcgAtAGYAYQByAGUAYQBzAHQA +OwANAAoACQBmAG8AbgB0AC0AdwBlAGkAZwBoAHQAOgBiAG8AbABkADsAfQANAAoAaAA0AA0ACgAJ +AHsAbQBzAG8ALQBzAHQAeQBsAGUALQBwAHIAaQBvAHIAaQB0AHkAOgA5ADsADQAKAAkAbQBzAG8A +LQBzAHQAeQBsAGUALQB1AG4AaABpAGQAZQA6AG4AbwA7AA0ACgAJAG0AcwBvAC0AcwB0AHkAbABl +AC0AcQBmAG8AcgBtAGEAdAA6AHkAZQBzADsADQAKAAkAbQBzAG8ALQBzAHQAeQBsAGUALQBsAGkA +bgBrADoAIgBIAGUAYQBkAGkAbgBnACAANAAgAEMAaABhAHIAIgA7AA0ACgAJAG0AcwBvAC0AbQBh +AHIAZwBpAG4ALQB0AG8AcAAtAGEAbAB0ADoAYQB1AHQAbwA7AA0ACgAJAG0AYQByAGcAaQBuAC0A +cgBpAGcAaAB0ADoAMABjAG0AOwANAAoACQBtAHMAbwAtAG0AYQByAGcAaQBuAC0AYgBvAHQAdABv +AG0ALQBhAGwAdAA6AGEAdQB0AG8AOwANAAoACQBtAGEAcgBnAGkAbgAtAGwAZQBmAHQAOgAwAGMA +bQA7AA0ACgAJAG0AcwBvAC0AcABhAGcAaQBuAGEAdABpAG8AbgA6AHcAaQBkAG8AdwAtAG8AcgBw +AGgAYQBuADsADQAKAAkAbQBzAG8ALQBvAHUAdABsAGkAbgBlAC0AbABlAHYAZQBsADoANAA7AA0A +CgAJAGYAbwBuAHQALQBzAGkAegBlADoAMQAyAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBh +AG0AaQBsAHkAOgAiAFQAaQBtAGUAcwAgAE4AZQB3ACAAUgBvAG0AYQBuACIALABzAGUAcgBpAGYA +OwANAAoACQBtAHMAbwAtAGYAYQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAi +AFQAaQBtAGUAcwAgAE4AZQB3ACAAUgBvAG0AYQBuACIAOwANAAoACQBtAHMAbwAtAGYAYQByAGUA +YQBzAHQALQB0AGgAZQBtAGUALQBmAG8AbgB0ADoAbQBpAG4AbwByAC0AZgBhAHIAZQBhAHMAdAA7 +AA0ACgAJAGYAbwBuAHQALQB3AGUAaQBnAGgAdAA6AGIAbwBsAGQAOwB9AA0ACgBhADoAbABpAG4A +awAsACAAcwBwAGEAbgAuAE0AcwBvAEgAeQBwAGUAcgBsAGkAbgBrAA0ACgAJAHsAbQBzAG8ALQBz +AHQAeQBsAGUALQBuAG8AcwBoAG8AdwA6AHkAZQBzADsADQAKAAkAbQBzAG8ALQBzAHQAeQBsAGUA +LQBwAHIAaQBvAHIAaQB0AHkAOgA5ADkAOwANAAoACQBjAG8AbABvAHIAOgBiAGwAdQBlADsADQAK +AAkAdABlAHgAdAAtAGQAZQBjAG8AcgBhAHQAaQBvAG4AOgB1AG4AZABlAHIAbABpAG4AZQA7AA0A +CgAJAHQAZQB4AHQALQB1AG4AZABlAHIAbABpAG4AZQA6AHMAaQBuAGcAbABlADsAfQANAAoAYQA6 +AHYAaQBzAGkAdABlAGQALAAgAHMAcABhAG4ALgBNAHMAbwBIAHkAcABlAHIAbABpAG4AawBGAG8A +bABsAG8AdwBlAGQADQAKAAkAewBtAHMAbwAtAHMAdAB5AGwAZQAtAG4AbwBzAGgAbwB3ADoAeQBl +AHMAOwANAAoACQBtAHMAbwAtAHMAdAB5AGwAZQAtAHAAcgBpAG8AcgBpAHQAeQA6ADkAOQA7AA0A +CgAJAGMAbwBsAG8AcgA6AHAAdQByAHAAbABlADsADQAKAAkAdABlAHgAdAAtAGQAZQBjAG8AcgBh +AHQAaQBvAG4AOgB1AG4AZABlAHIAbABpAG4AZQA7AA0ACgAJAHQAZQB4AHQALQB1AG4AZABlAHIA +bABpAG4AZQA6AHMAaQBuAGcAbABlADsAfQANAAoAcAANAAoACQB7AG0AcwBvAC0AcwB0AHkAbABl +AC0AbgBvAHMAaABvAHcAOgB5AGUAcwA7AA0ACgAJAG0AcwBvAC0AcwB0AHkAbABlAC0AcAByAGkA +bwByAGkAdAB5ADoAOQA5ADsADQAKAAkAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAHQAbwBwAC0AYQBs +AHQAOgBhAHUAdABvADsADQAKAAkAbQBhAHIAZwBpAG4ALQByAGkAZwBoAHQAOgAwAGMAbQA7AA0A +CgAJAG0AcwBvAC0AbQBhAHIAZwBpAG4ALQBiAG8AdAB0AG8AbQAtAGEAbAB0ADoAYQB1AHQAbwA7 +AA0ACgAJAG0AYQByAGcAaQBuAC0AbABlAGYAdAA6ADAAYwBtADsADQAKAAkAbQBzAG8ALQBwAGEA +ZwBpAG4AYQB0AGkAbwBuADoAdwBpAGQAbwB3AC0AbwByAHAAaABhAG4AOwANAAoACQBmAG8AbgB0 +AC0AcwBpAHoAZQA6ADEAMgAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoA +IgBUAGkAbQBlAHMAIABOAGUAdwAgAFIAbwBtAGEAbgAiACwAcwBlAHIAaQBmADsADQAKAAkAbQBz +AG8ALQBmAGEAcgBlAGEAcwB0AC0AZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAIgBUAGkAbQBlAHMA +IABOAGUAdwAgAFIAbwBtAGEAbgAiADsADQAKAAkAbQBzAG8ALQBmAGEAcgBlAGEAcwB0AC0AdABo +AGUAbQBlAC0AZgBvAG4AdAA6AG0AaQBuAG8AcgAtAGYAYQByAGUAYQBzAHQAOwB9AA0ACgBzAHAA +YQBuAC4ASABlAGEAZABpAG4AZwAxAEMAaABhAHIADQAKAAkAewBtAHMAbwAtAHMAdAB5AGwAZQAt +AG4AYQBtAGUAOgAiAEgAZQBhAGQAaQBuAGcAIAAxACAAQwBoAGEAcgAiADsADQAKAAkAbQBzAG8A +LQBzAHQAeQBsAGUALQBwAHIAaQBvAHIAaQB0AHkAOgA5ADsADQAKAAkAbQBzAG8ALQBzAHQAeQBs +AGUALQB1AG4AaABpAGQAZQA6AG4AbwA7AA0ACgAJAG0AcwBvAC0AcwB0AHkAbABlAC0AbABvAGMA +awBlAGQAOgB5AGUAcwA7AA0ACgAJAG0AcwBvAC0AcwB0AHkAbABlAC0AbABpAG4AawA6ACIASABl +AGEAZABpAG4AZwAgADEAIgA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkA +egBlADoAMQA2AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGIAaQBkAGkALQBmAG8AbgB0AC0AcwBp +AHoAZQA6ADEANgAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAIgBDAGEA +bABpAGIAcgBpACAATABpAGcAaAB0ACIALABzAGEAbgBzAC0AcwBlAHIAaQBmADsADQAKAAkAbQBz +AG8ALQBhAHMAYwBpAGkALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAEMAYQBsAGkAYgByAGkA +IABMAGkAZwBoAHQAIgA7AA0ACgAJAG0AcwBvAC0AYQBzAGMAaQBpAC0AdABoAGUAbQBlAC0AZgBv +AG4AdAA6AG0AYQBqAG8AcgAtAGwAYQB0AGkAbgA7AA0ACgAJAG0AcwBvAC0AZgBhAHIAZQBhAHMA +dAAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBh +AG4AIgA7AA0ACgAJAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAHQAaABlAG0AZQAtAGYAbwBuAHQA +OgBtAGEAagBvAHIALQBmAGEAcgBlAGEAcwB0ADsADQAKAAkAbQBzAG8ALQBoAGEAbgBzAGkALQBm +AG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAEMAYQBsAGkAYgByAGkAIABMAGkAZwBoAHQAIgA7AA0A +CgAJAG0AcwBvAC0AaABhAG4AcwBpAC0AdABoAGUAbQBlAC0AZgBvAG4AdAA6AG0AYQBqAG8AcgAt +AGwAYQB0AGkAbgA7AA0ACgAJAG0AcwBvAC0AYgBpAGQAaQAtAGYAbwBuAHQALQBmAGEAbQBpAGwA +eQA6ACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgA7AA0ACgAJAG0AcwBvAC0AYgBp +AGQAaQAtAHQAaABlAG0AZQAtAGYAbwBuAHQAOgBtAGEAagBvAHIALQBiAGkAZABpADsADQAKAAkA +YwBvAGwAbwByADoAIwAyAEYANQA0ADkANgA7AA0ACgAJAG0AcwBvAC0AdABoAGUAbQBlAGMAbwBs +AG8AcgA6AGEAYwBjAGUAbgB0ADEAOwANAAoACQBtAHMAbwAtAHQAaABlAG0AZQBzAGgAYQBkAGUA +OgAxADkAMQA7AH0ADQAKAHMAcABhAG4ALgBIAGUAYQBkAGkAbgBnADIAQwBoAGEAcgANAAoACQB7 +AG0AcwBvAC0AcwB0AHkAbABlAC0AbgBhAG0AZQA6ACIASABlAGEAZABpAG4AZwAgADIAIABDAGgA +YQByACIAOwANAAoACQBtAHMAbwAtAHMAdAB5AGwAZQAtAG4AbwBzAGgAbwB3ADoAeQBlAHMAOwAN +AAoACQBtAHMAbwAtAHMAdAB5AGwAZQAtAHAAcgBpAG8AcgBpAHQAeQA6ADkAOwANAAoACQBtAHMA +bwAtAHMAdAB5AGwAZQAtAHUAbgBoAGkAZABlADoAbgBvADsADQAKAAkAbQBzAG8ALQBzAHQAeQBs +AGUALQBsAG8AYwBrAGUAZAA6AHkAZQBzADsADQAKAAkAbQBzAG8ALQBzAHQAeQBsAGUALQBsAGkA +bgBrADoAIgBIAGUAYQBkAGkAbgBnACAAMgAiADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBv +AG4AdAAtAHMAaQB6AGUAOgAxADMALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYgBpAGQAaQAtAGYA +bwBuAHQALQBzAGkAegBlADoAMQAzAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBs +AHkAOgAiAEMAYQBsAGkAYgByAGkAIABMAGkAZwBoAHQAIgAsAHMAYQBuAHMALQBzAGUAcgBpAGYA +OwANAAoACQBtAHMAbwAtAGEAcwBjAGkAaQAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6ACIAQwBh +AGwAaQBiAHIAaQAgAEwAaQBnAGgAdAAiADsADQAKAAkAbQBzAG8ALQBhAHMAYwBpAGkALQB0AGgA +ZQBtAGUALQBmAG8AbgB0ADoAbQBhAGoAbwByAC0AbABhAHQAaQBuADsADQAKAAkAbQBzAG8ALQBm +AGEAcgBlAGEAcwB0AC0AZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAIgBUAGkAbQBlAHMAIABOAGUA +dwAgAFIAbwBtAGEAbgAiADsADQAKAAkAbQBzAG8ALQBmAGEAcgBlAGEAcwB0AC0AdABoAGUAbQBl +AC0AZgBvAG4AdAA6AG0AYQBqAG8AcgAtAGYAYQByAGUAYQBzAHQAOwANAAoACQBtAHMAbwAtAGgA +YQBuAHMAaQAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6ACIAQwBhAGwAaQBiAHIAaQAgAEwAaQBn +AGgAdAAiADsADQAKAAkAbQBzAG8ALQBoAGEAbgBzAGkALQB0AGgAZQBtAGUALQBmAG8AbgB0ADoA +bQBhAGoAbwByAC0AbABhAHQAaQBuADsADQAKAAkAbQBzAG8ALQBiAGkAZABpAC0AZgBvAG4AdAAt +AGYAYQBtAGkAbAB5ADoAIgBUAGkAbQBlAHMAIABOAGUAdwAgAFIAbwBtAGEAbgAiADsADQAKAAkA +bQBzAG8ALQBiAGkAZABpAC0AdABoAGUAbQBlAC0AZgBvAG4AdAA6AG0AYQBqAG8AcgAtAGIAaQBk +AGkAOwANAAoACQBjAG8AbABvAHIAOgAjADIARgA1ADQAOQA2ADsADQAKAAkAbQBzAG8ALQB0AGgA +ZQBtAGUAYwBvAGwAbwByADoAYQBjAGMAZQBuAHQAMQA7AA0ACgAJAG0AcwBvAC0AdABoAGUAbQBl +AHMAaABhAGQAZQA6ADEAOQAxADsAfQANAAoAcwBwAGEAbgAuAEgAZQBhAGQAaQBuAGcAMwBDAGgA +YQByAA0ACgAJAHsAbQBzAG8ALQBzAHQAeQBsAGUALQBuAGEAbQBlADoAIgBIAGUAYQBkAGkAbgBn +ACAAMwAgAEMAaABhAHIAIgA7AA0ACgAJAG0AcwBvAC0AcwB0AHkAbABlAC0AbgBvAHMAaABvAHcA +OgB5AGUAcwA7AA0ACgAJAG0AcwBvAC0AcwB0AHkAbABlAC0AcAByAGkAbwByAGkAdAB5ADoAOQA7 +AA0ACgAJAG0AcwBvAC0AcwB0AHkAbABlAC0AdQBuAGgAaQBkAGUAOgBuAG8AOwANAAoACQBtAHMA +bwAtAHMAdAB5AGwAZQAtAGwAbwBjAGsAZQBkADoAeQBlAHMAOwANAAoACQBtAHMAbwAtAHMAdAB5 +AGwAZQAtAGwAaQBuAGsAOgAiAEgAZQBhAGQAaQBuAGcAIAAzACIAOwANAAoACQBtAHMAbwAtAGEA +bgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMgAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBi +AGkAZABpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADIALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQA +LQBmAGEAbQBpAGwAeQA6ACIAQwBhAGwAaQBiAHIAaQAgAEwAaQBnAGgAdAAiACwAcwBhAG4AcwAt +AHMAZQByAGkAZgA7AA0ACgAJAG0AcwBvAC0AYQBzAGMAaQBpAC0AZgBvAG4AdAAtAGYAYQBtAGkA +bAB5ADoAIgBDAGEAbABpAGIAcgBpACAATABpAGcAaAB0ACIAOwANAAoACQBtAHMAbwAtAGEAcwBj +AGkAaQAtAHQAaABlAG0AZQAtAGYAbwBuAHQAOgBtAGEAagBvAHIALQBsAGEAdABpAG4AOwANAAoA +CQBtAHMAbwAtAGYAYQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAFQAaQBt +AGUAcwAgAE4AZQB3ACAAUgBvAG0AYQBuACIAOwANAAoACQBtAHMAbwAtAGYAYQByAGUAYQBzAHQA +LQB0AGgAZQBtAGUALQBmAG8AbgB0ADoAbQBhAGoAbwByAC0AZgBhAHIAZQBhAHMAdAA7AA0ACgAJ +AG0AcwBvAC0AaABhAG4AcwBpAC0AZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAIgBDAGEAbABpAGIA +cgBpACAATABpAGcAaAB0ACIAOwANAAoACQBtAHMAbwAtAGgAYQBuAHMAaQAtAHQAaABlAG0AZQAt +AGYAbwBuAHQAOgBtAGEAagBvAHIALQBsAGEAdABpAG4AOwANAAoACQBtAHMAbwAtAGIAaQBkAGkA +LQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAFQAaQBtAGUAcwAgAE4AZQB3ACAAUgBvAG0AYQBu +ACIAOwANAAoACQBtAHMAbwAtAGIAaQBkAGkALQB0AGgAZQBtAGUALQBmAG8AbgB0ADoAbQBhAGoA +bwByAC0AYgBpAGQAaQA7AA0ACgAJAGMAbwBsAG8AcgA6ACMAMQBGADMANwA2ADMAOwANAAoACQBt +AHMAbwAtAHQAaABlAG0AZQBjAG8AbABvAHIAOgBhAGMAYwBlAG4AdAAxADsADQAKAAkAbQBzAG8A +LQB0AGgAZQBtAGUAcwBoAGEAZABlADoAMQAyADcAOwB9AA0ACgBzAHAAYQBuAC4ASABlAGEAZABp +AG4AZwA0AEMAaABhAHIADQAKAAkAewBtAHMAbwAtAHMAdAB5AGwAZQAtAG4AYQBtAGUAOgAiAEgA +ZQBhAGQAaQBuAGcAIAA0ACAAQwBoAGEAcgAiADsADQAKAAkAbQBzAG8ALQBzAHQAeQBsAGUALQBu +AG8AcwBoAG8AdwA6AHkAZQBzADsADQAKAAkAbQBzAG8ALQBzAHQAeQBsAGUALQBwAHIAaQBvAHIA +aQB0AHkAOgA5ADsADQAKAAkAbQBzAG8ALQBzAHQAeQBsAGUALQB1AG4AaABpAGQAZQA6AG4AbwA7 +AA0ACgAJAG0AcwBvAC0AcwB0AHkAbABlAC0AbABvAGMAawBlAGQAOgB5AGUAcwA7AA0ACgAJAG0A +cwBvAC0AcwB0AHkAbABlAC0AbABpAG4AawA6ACIASABlAGEAZABpAG4AZwAgADQAIgA7AA0ACgAJ +AG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAyAC4AMABwAHQAOwANAAoA +CQBtAHMAbwAtAGIAaQBkAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMgAuADAAcAB0ADsADQAK +AAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAIgBDAGEAbABpAGIAcgBpACAATABpAGcAaAB0ACIA +LABzAGEAbgBzAC0AcwBlAHIAaQBmADsADQAKAAkAbQBzAG8ALQBhAHMAYwBpAGkALQBmAG8AbgB0 +AC0AZgBhAG0AaQBsAHkAOgAiAEMAYQBsAGkAYgByAGkAIABMAGkAZwBoAHQAIgA7AA0ACgAJAG0A +cwBvAC0AYQBzAGMAaQBpAC0AdABoAGUAbQBlAC0AZgBvAG4AdAA6AG0AYQBqAG8AcgAtAGwAYQB0 +AGkAbgA7AA0ACgAJAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBpAGwA +eQA6ACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgA7AA0ACgAJAG0AcwBvAC0AZgBh +AHIAZQBhAHMAdAAtAHQAaABlAG0AZQAtAGYAbwBuAHQAOgBtAGEAagBvAHIALQBmAGEAcgBlAGEA +cwB0ADsADQAKAAkAbQBzAG8ALQBoAGEAbgBzAGkALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAi +AEMAYQBsAGkAYgByAGkAIABMAGkAZwBoAHQAIgA7AA0ACgAJAG0AcwBvAC0AaABhAG4AcwBpAC0A +dABoAGUAbQBlAC0AZgBvAG4AdAA6AG0AYQBqAG8AcgAtAGwAYQB0AGkAbgA7AA0ACgAJAG0AcwBv +AC0AYgBpAGQAaQAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAATgBlAHcA +IABSAG8AbQBhAG4AIgA7AA0ACgAJAG0AcwBvAC0AYgBpAGQAaQAtAHQAaABlAG0AZQAtAGYAbwBu +AHQAOgBtAGEAagBvAHIALQBiAGkAZABpADsADQAKAAkAYwBvAGwAbwByADoAIwAyAEYANQA0ADkA +NgA7AA0ACgAJAG0AcwBvAC0AdABoAGUAbQBlAGMAbwBsAG8AcgA6AGEAYwBjAGUAbgB0ADEAOwAN +AAoACQBtAHMAbwAtAHQAaABlAG0AZQBzAGgAYQBkAGUAOgAxADkAMQA7AA0ACgAJAGYAbwBuAHQA +LQBzAHQAeQBsAGUAOgBpAHQAYQBsAGkAYwA7AH0ADQAKAHAALgBtAHMAbwBuAG8AcgBtAGEAbAAw +ACwAIABsAGkALgBtAHMAbwBuAG8AcgBtAGEAbAAwACwAIABkAGkAdgAuAG0AcwBvAG4AbwByAG0A +YQBsADAADQAKAAkAewBtAHMAbwAtAHMAdAB5AGwAZQAtAG4AYQBtAGUAOgBtAHMAbwBuAG8AcgBt +AGEAbAA7AA0ACgAJAG0AcwBvAC0AcwB0AHkAbABlAC0AbgBvAHMAaABvAHcAOgB5AGUAcwA7AA0A +CgAJAG0AcwBvAC0AcwB0AHkAbABlAC0AcAByAGkAbwByAGkAdAB5ADoAOQA5ADsADQAKAAkAbQBz +AG8ALQBzAHQAeQBsAGUALQB1AG4AaABpAGQAZQA6AG4AbwA7AA0ACgAJAG0AcwBvAC0AbQBhAHIA +ZwBpAG4ALQB0AG8AcAAtAGEAbAB0ADoAYQB1AHQAbwA7AA0ACgAJAG0AYQByAGcAaQBuAC0AcgBp +AGcAaAB0ADoAMABjAG0AOwANAAoACQBtAHMAbwAtAG0AYQByAGcAaQBuAC0AYgBvAHQAdABvAG0A +LQBhAGwAdAA6AGEAdQB0AG8AOwANAAoACQBtAGEAcgBnAGkAbgAtAGwAZQBmAHQAOgAwAGMAbQA7 +AA0ACgAJAG0AcwBvAC0AcABhAGcAaQBuAGEAdABpAG8AbgA6AHcAaQBkAG8AdwAtAG8AcgBwAGgA +YQBuADsADQAKAAkAZgBvAG4AdAAtAHMAaQB6AGUAOgAxADIALgAwAHAAdAA7AA0ACgAJAGYAbwBu +AHQALQBmAGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAsAHMA +ZQByAGkAZgA7AA0ACgAJAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBp +AGwAeQA6ACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgA7AA0ACgAJAG0AcwBvAC0A +ZgBhAHIAZQBhAHMAdAAtAHQAaABlAG0AZQAtAGYAbwBuAHQAOgBtAGkAbgBvAHIALQBmAGEAcgBl +AGEAcwB0ADsAfQANAAoAcwBwAGEAbgAuAFMAcABlAGwAbABFAA0ACgAJAHsAbQBzAG8ALQBzAHQA +eQBsAGUALQBuAGEAbQBlADoAIgAiADsADQAKAAkAbQBzAG8ALQBzAHAAbAAtAGUAOgB5AGUAcwA7 +AH0ADQAKAC4ATQBzAG8AQwBoAHAARABlAGYAYQB1AGwAdAANAAoACQB7AG0AcwBvAC0AcwB0AHkA +bABlAC0AdAB5AHAAZQA6AGUAeABwAG8AcgB0AC0AbwBuAGwAeQA7AA0ACgAJAG0AcwBvAC0AZABl +AGYAYQB1AGwAdAAtAHAAcgBvAHAAcwA6AHkAZQBzADsADQAKAAkAZgBvAG4AdAAtAHMAaQB6AGUA +OgAxADAALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBl +ADoAMQAwAC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGIAaQBkAGkALQBmAG8AbgB0AC0AcwBpAHoA +ZQA6ADEAMAAuADAAcAB0ADsAfQANAAoAQABwAGEAZwBlACAAVwBvAHIAZABTAGUAYwB0AGkAbwBu +ADEADQAKAAkAewBzAGkAegBlADoANgAxADIALgAwAHAAdAAgADcAOQAyAC4AMABwAHQAOwANAAoA +CQBtAGEAcgBnAGkAbgA6ADcAMgAuADAAcAB0ACAANwAyAC4AMABwAHQAIAA3ADIALgAwAHAAdAAg +ADcAMgAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBoAGUAYQBkAGUAcgAtAG0AYQByAGcAaQBuADoA +MwA2AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGYAbwBvAHQAZQByAC0AbQBhAHIAZwBpAG4AOgAz +ADYALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AcABhAHAAZQByAC0AcwBvAHUAcgBjAGUAOgAwADsA +fQANAAoAZABpAHYALgBXAG8AcgBkAFMAZQBjAHQAaQBvAG4AMQANAAoACQB7AHAAYQBnAGUAOgBX +AG8AcgBkAFMAZQBjAHQAaQBvAG4AMQA7AH0ADQAKACAALwAqACAATABpAHMAdAAgAEQAZQBmAGkA +bgBpAHQAaQBvAG4AcwAgACoALwANAAoAIABAAGwAaQBzAHQAIABsADAADQAKAAkAewBtAHMAbwAt +AGwAaQBzAHQALQBpAGQAOgA0ADkAMgAyADgANAA5ADAAOwANAAoACQBtAHMAbwAtAGwAaQBzAHQA +LQB0AGUAbQBwAGwAYQB0AGUALQBpAGQAcwA6ADEAOAAzADUANQA3ADAAOAA1ADQAOwB9AA0ACgBA +AGwAaQBzAHQAIABsADAAOgBsAGUAdgBlAGwAMQANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0A +bgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBs +AGUAdgBlAGwALQB0AGUAeAB0ADoAt/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIA +LQBzAHQAbwBwADoAMwA2AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBt +AGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4A +ZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0 +AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoA +UwB5AG0AYgBvAGwAOwB9AA0ACgBAAGwAaQBzAHQAIABsADAAOgBsAGUAdgBlAGwAMgANAAoACQB7 +AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwA +ZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAbwA7AA0ACgAJAG0AcwBv +AC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoANwAyAC4AMABwAHQAOwANAAoACQBtAHMA +bwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7 +AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMA +bwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBv +AG4AdAAtAGYAYQBtAGkAbAB5ADoAIgBDAG8AdQByAGkAZQByACAATgBlAHcAIgA7AA0ACgAJAG0A +cwBvAC0AYgBpAGQAaQAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAATgBl +AHcAIABSAG8AbQBhAG4AIgA7AH0ADQAKAEAAbABpAHMAdAAgAGwAMAA6AGwAZQB2AGUAbAAzAA0A +CgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1 +AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkA +bQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAxADAAOAAuADAAcAB0ADsADQAK +AAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwA +ZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAK +AAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0A +CgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoAQABs +AGkAcwB0ACAAbAAwADoAbABlAHYAZQBsADQADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4A +dQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABl +AHYAZQBsAC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0A +cwB0AG8AcAA6ADEANAA0AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBt +AGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4A +ZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0 +AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoA +VwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADAAOgBsAGUAdgBlAGwANQAN +AAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIA +dQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJ +AG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMQA4ADAALgAwAHAAdAA7AA0A +CgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBs +AGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0A +CgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwAN +AAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAA +bABpAHMAdAAgAGwAMAA6AGwAZQB2AGUAbAA2AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBu +AHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwA +ZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAt +AHMAdABvAHAAOgAyADEANgAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUA +bQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBu +AGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4A +dAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6 +AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAAwADoAbABlAHYAZQBsADcA +DQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBi +AHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfwOwANAAoA +CQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADIANQAyAC4AMABwAHQAOwAN +AAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoA +bABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwAN +AAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsA +DQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBA +AGwAaQBzAHQAIABsADAAOgBsAGUAdgBlAGwAOAANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0A +bgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBs +AGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIA +LQBzAHQAbwBwADoAMgA4ADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1 +AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkA +bgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBu +AHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkA +OgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwAMAA6AGwAZQB2AGUAbAA5 +AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoA +YgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAK +AAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAzADIANAAuADAAcAB0ADsA +DQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6 +AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsA +DQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7 +AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoA +QABsAGkAcwB0ACAAbAAxAA0ACgAJAHsAbQBzAG8ALQBsAGkAcwB0AC0AaQBkADoAMQA2ADAAMwAx +ADgAMAAxADYAOwANAAoACQBtAHMAbwAtAGwAaQBzAHQALQB0AGUAbQBwAGwAYQB0AGUALQBpAGQA +cwA6AC0AMQA3ADEAMQA2ADMAMQA1ADMANAA7AH0ADQAKAEAAbABpAHMAdAAgAGwAMQA6AGwAZQB2 +AGUAbAAxAA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0A +YQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn +8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAzADYALgAwAHAA +dAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBv +AG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAA +dAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABw +AHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0A +DQAKAEAAbABpAHMAdAAgAGwAMQA6AGwAZQB2AGUAbAAyAA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBl +AGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMA +bwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0 +AGEAYgAtAHMAdABvAHAAOgA3ADIALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0A +bgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAt +AGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYA +bwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBs +AHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwAMQA6AGwAZQB2AGUA +bAAzAA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0 +ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsA +DQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAxADAAOAAuADAAcAB0 +ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8A +bgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0 +ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAA +dAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQAN +AAoAQABsAGkAcwB0ACAAbAAxADoAbABlAHYAZQBsADQADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUA +bAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBv +AC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQA +YQBiAC0AcwB0AG8AcAA6ADEANAA0AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAt +AG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQA +LQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBm +AG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkA +bAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADEAOgBsAGUAdgBl +AGwANQANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEA +dAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7 +AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMQA4ADAALgAwAHAA +dAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBv +AG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAA +dAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABw +AHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0A +DQAKAEAAbABpAHMAdAAgAGwAMQA6AGwAZQB2AGUAbAA2AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBl +AGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMA +bwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0 +AGEAYgAtAHMAdABvAHAAOgAyADEANgAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwA +LQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0 +AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0A +ZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBp +AGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAAxADoAbABlAHYA +ZQBsADcADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBh +AHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfw +OwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADIANQAyAC4AMABw +AHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkA +bwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABw +AHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAA +cAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9 +AA0ACgBAAGwAaQBzAHQAIABsADEAOgBsAGUAdgBlAGwAOAANAAoACQB7AG0AcwBvAC0AbABlAHYA +ZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBz +AG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0A +dABhAGIALQBzAHQAbwBwADoAMgA4ADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBs +AC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgA +dAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAt +AGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0A +aQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwAMQA6AGwAZQB2 +AGUAbAA5AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0A +YQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn +8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAzADIANAAuADAA +cAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABp +AG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAA +cAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAw +AHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsA +fQANAAoAQABsAGkAcwB0ACAAbAAyAA0ACgAJAHsAbQBzAG8ALQBsAGkAcwB0AC0AaQBkADoAMgAw +ADMANQA1ADkANwAxADQAOwANAAoACQBtAHMAbwAtAGwAaQBzAHQALQB0AGUAbQBwAGwAYQB0AGUA +LQBpAGQAcwA6ADEANwA3ADQANwA1ADIAOQAyADYAOwB9AA0ACgBAAGwAaQBzAHQAIABsADIAOgBs +AGUAdgBlAGwAMQANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8A +cgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0 +ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMwA2AC4A +MABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0 +AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4A +MABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAu +ADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMA +OwB9AA0ACgBAAGwAaQBzAHQAIABsADIAOgBsAGUAdgBlAGwAMgANAAoACQB7AG0AcwBvAC0AbABl +AHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkA +bQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBs +AC0AdABhAGIALQBzAHQAbwBwADoANwAyAC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUA +bAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4 +AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkA +LQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBt +AGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADIAOgBsAGUA +dgBlAGwAMwANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBt +AGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoA +p/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMQAwADgALgAw +AHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQA +aQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAw +AHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4A +MABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7 +AH0ADQAKAEAAbABpAHMAdAAgAGwAMgA6AGwAZQB2AGUAbAA0AA0ACgAJAHsAbQBzAG8ALQBsAGUA +dgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBt +AHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwA +LQB0AGEAYgAtAHMAdABvAHAAOgAxADQANAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBl +AGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUA +eAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBp +AC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEA +bQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAAyADoAbABl +AHYAZQBsADUADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIA +bQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6 +AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADEAOAAwAC4A +MABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0 +AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4A +MABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAu +ADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMA +OwB9AA0ACgBAAGwAaQBzAHQAIABsADIAOgBsAGUAdgBlAGwANgANAAoACQB7AG0AcwBvAC0AbABl +AHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkA +bQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBs +AC0AdABhAGIALQBzAHQAbwBwADoAMgAxADYALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYA +ZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABl +AHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMA +aQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBh +AG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwAMgA6AGwA +ZQB2AGUAbAA3AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwBy +AG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQA +OgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAyADUAMgAu +ADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkA +dABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAu +ADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAA +LgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBz +ADsAfQANAAoAQABsAGkAcwB0ACAAbAAyADoAbABlAHYAZQBsADgADQAKAAkAewBtAHMAbwAtAGwA +ZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJ +AG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUA +bAAtAHQAYQBiAC0AcwB0AG8AcAA6ADIAOAA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2 +AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQA +ZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBz +AGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYA +YQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADIAOgBs +AGUAdgBlAGwAOQANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8A +cgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0 +ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMwAyADQA +LgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBp +AHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgA +LgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAw +AC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcA +cwA7AH0ADQAKAEAAbABpAHMAdAAgAGwAMwANAAoACQB7AG0AcwBvAC0AbABpAHMAdAAtAGkAZAA6 +ADIANQA4ADAAMgAzADMAMgA0ADsADQAKAAkAbQBzAG8ALQBsAGkAcwB0AC0AdABlAG0AcABsAGEA +dABlAC0AaQBkAHMAOgAtADgANAA1ADUAMgA3ADgAOQA0ADsAfQANAAoAQABsAGkAcwB0ACAAbAAz +ADoAbABlAHYAZQBsADEADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0A +ZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABl +AHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADMA +NgAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBz +AGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEA +OAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAx +ADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4A +ZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAAzADoAbABlAHYAZQBsADIADQAKAAkAewBtAHMAbwAt +AGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0A +CgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2 +AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADcAMgAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUA +dgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0 +AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4A +cwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBm +AGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAAzADoA +bABlAHYAZQBsADMADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBv +AHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgA +dAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADEAMAA4 +AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMA +aQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4 +AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEA +MAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBn +AHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADMAOgBsAGUAdgBlAGwANAANAAoACQB7AG0AcwBvAC0A +bABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAK +AAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYA +ZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMQA0ADQALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABl +AHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkA +dABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBu +AHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0A +ZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwAMwA6 +AGwAZQB2AGUAbAA1AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYA +bwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4 +AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAxADgA +MAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBz +AGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEA +OAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAx +ADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4A +ZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAAzADoAbABlAHYAZQBsADYADQAKAAkAewBtAHMAbwAt +AGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0A +CgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2 +AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADIAMQA2AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwA +ZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJ +AHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEA +bgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAt +AGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADMA +OgBsAGUAdgBlAGwANwANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBm +AG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUA +eAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMgA1 +ADIALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8A +cwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAx +ADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoA +MQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBu +AGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwAMwA6AGwAZQB2AGUAbAA4AA0ACgAJAHsAbQBzAG8A +LQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwAN +AAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUA +dgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAyADgAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBs +AGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoA +CQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBh +AG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQA +LQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAAz +ADoAbABlAHYAZQBsADkADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0A +ZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABl +AHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADMA +MgA0AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABv +AHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0A +MQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6 +ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkA +bgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADQADQAKAAkAewBtAHMAbwAtAGwAaQBzAHQALQBp +AGQAOgAzADcAMAA0ADIAMwA0ADAAMAA7AA0ACgAJAG0AcwBvAC0AbABpAHMAdAAtAHQAZQBtAHAA +bABhAHQAZQAtAGkAZABzADoALQA4ADkANwA0ADIANwA2ADEANAA7AH0ADQAKAEAAbABpAHMAdAAg +AGwANAA6AGwAZQB2AGUAbAAxAA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUA +cgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAt +AHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAA +OgAzADYALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBw +AG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoA +LQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBl +ADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQA +aQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwANAA6AGwAZQB2AGUAbAAyAA0ACgAJAHsAbQBz +AG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQA +OwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBs +AGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgA3ADIALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0A +bABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAK +AAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0A +YQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0 +AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwA +NAA6AGwAZQB2AGUAbAAzAA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAt +AGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQA +ZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAx +ADAAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAA +bwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAt +ADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUA +OgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABp +AG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAA0ADoAbABlAHYAZQBsADQADQAKAAkAewBtAHMA +bwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7 +AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwA +ZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADEANAA0AC4AMABwAHQAOwANAAoACQBtAHMAbwAt +AGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0A +CgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAt +AGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4A +dAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABs +ADQAOgBsAGUAdgBlAGwANQANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIA +LQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0 +AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoA +MQA4ADAALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBw +AG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoA +LQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBl +ADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQA +aQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwANAA6AGwAZQB2AGUAbAA2AA0ACgAJAHsAbQBz +AG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQA +OwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBs +AGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAyADEANgAuADAAcAB0ADsADQAKAAkAbQBzAG8A +LQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwAN +AAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8A +LQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBu +AHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAA +bAA0ADoAbABlAHYAZQBsADcADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQBy +AC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0A +dABlAHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6 +ADIANQAyAC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0A +cABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6 +AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoA +ZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBk +AGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADQAOgBsAGUAdgBlAGwAOAANAAoACQB7AG0A +cwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0 +ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0A +bABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMgA4ADgALgAwAHAAdAA7AA0ACgAJAG0AcwBv +AC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsA +DQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBv +AC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8A +bgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAg +AGwANAA6AGwAZQB2AGUAbAA5AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUA +cgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAt +AHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAA +OgAzADIANAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAt +AHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQA +OgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6 +AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcA +ZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAA1AA0ACgAJAHsAbQBzAG8ALQBsAGkAcwB0 +AC0AaQBkADoANAAxADUANQAxADkANwA5ADUAOwANAAoACQBtAHMAbwAtAGwAaQBzAHQALQB0AGUA +bQBwAGwAYQB0AGUALQBpAGQAcwA6AC0ANAAzADEAMwA0ADMAOAA5ADIAOwB9AA0ACgBAAGwAaQBz +AHQAIABsADUAOgBsAGUAdgBlAGwAMQANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0A +YgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBl +AGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQA +bwBwADoAMwA2AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQBy +AC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4A +dAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBp +AHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4A +ZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADUAOgBsAGUAdgBlAGwAMgANAAoACQB7 +AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwA +ZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBv +AC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoANwAyAC4AMABwAHQAOwANAAoACQBtAHMA +bwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7 +AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMA +bwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBv +AG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQA +IABsADUAOgBsAGUAdgBlAGwAMwANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBl +AHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwA +LQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBw +ADoAMQAwADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIA +LQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0 +ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkA +egBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBn +AGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwANQA6AGwAZQB2AGUAbAA0AA0ACgAJAHsA +bQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABl +AHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8A +LQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAxADQANAAuADAAcAB0ADsADQAKAAkAbQBz +AG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQA +OwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBz +AG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYA +bwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0 +ACAAbAA1ADoAbABlAHYAZQBsADUADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIA +ZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBs +AC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8A +cAA6ADEAOAAwAC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQBy +AC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4A +dAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBp +AHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4A +ZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADUAOgBsAGUAdgBlAGwANgANAAoACQB7 +AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwA +ZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBv +AC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMgAxADYALgAwAHAAdAA7AA0ACgAJAG0A +cwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0 +ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0A +cwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBm +AG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMA +dAAgAGwANQA6AGwAZQB2AGUAbAA3AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBi +AGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUA +bAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABv +AHAAOgAyADUAMgAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUA +cgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBu +AHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMA +aQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBu +AGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAA1ADoAbABlAHYAZQBsADgADQAKAAkA +ewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABs +AGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMA +bwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADIAOAA4AC4AMABwAHQAOwANAAoACQBt +AHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYA +dAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBt +AHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkA +ZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBz +AHQAIABsADUAOgBsAGUAdgBlAGwAOQANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0A +YgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBl +AGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQA +bwBwADoAMwAyADQALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBl +AHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUA +bgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBz +AGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkA +bgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwANgANAAoACQB7AG0AcwBvAC0AbABp +AHMAdAAtAGkAZAA6ADQAOQAxADgAMAAwADEAMQA2ADsADQAKAAkAbQBzAG8ALQBsAGkAcwB0AC0A +dABlAG0AcABsAGEAdABlAC0AaQBkAHMAOgAtADMAMgA3ADIAMAA1ADAANAA7AH0ADQAKAEAAbABp +AHMAdAAgAGwANgA6AGwAZQB2AGUAbAAxAA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUA +bQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2 +AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMA +dABvAHAAOgAzADYALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBl +AHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUA +bgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBz +AGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkA +bgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwANgA6AGwAZQB2AGUAbAAyAA0ACgAJ +AHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwA +bABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBz +AG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgA3ADIALgAwAHAAdAA7AA0ACgAJAG0A +cwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0 +ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0A +cwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBm +AG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMA +dAAgAGwANgA6AGwAZQB2AGUAbAAzAA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBi +AGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUA +bAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABv +AHAAOgAxADAAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUA +cgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBu +AHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMA +aQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBu +AGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAA2ADoAbABlAHYAZQBsADQADQAKAAkA +ewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABs +AGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMA +bwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADEANAA0AC4AMABwAHQAOwANAAoACQBt +AHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYA +dAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBt +AHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkA +ZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBz +AHQAIABsADYAOgBsAGUAdgBlAGwANQANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0A +YgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBl +AGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQA +bwBwADoAMQA4ADAALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBl +AHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUA +bgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBz +AGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkA +bgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwANgA6AGwAZQB2AGUAbAA2AA0ACgAJ +AHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwA +bABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBz +AG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAyADEANgAuADAAcAB0ADsADQAKAAkA +bQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBm +AHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkA +bQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJ +AGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkA +cwB0ACAAbAA2ADoAbABlAHYAZQBsADcADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBt +AGIAZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYA +ZQBsAC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0 +AG8AcAA6ADIANQAyAC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIA +ZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABl +AG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0A +cwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBp +AG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADYAOgBsAGUAdgBlAGwAOAANAAoA +CQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBs +AGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0A +cwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMgA4ADgALgAwAHAAdAA7AA0ACgAJ +AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUA +ZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJ +AG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoA +CQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABp +AHMAdAAgAGwANgA6AGwAZQB2AGUAbAA5AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUA +bQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2 +AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMA +dABvAHAAOgAzADIANAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBi +AGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQA +ZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAt +AHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcA +aQBuAGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAA3AA0ACgAJAHsAbQBzAG8ALQBs +AGkAcwB0AC0AaQBkADoAOAAwADQAMQA1ADMAMgA1ADEAOwANAAoACQBtAHMAbwAtAGwAaQBzAHQA +LQB0AGUAbQBwAGwAYQB0AGUALQBpAGQAcwA6ADEAMAAzADEAMQA1ADYAMwA5ADYAOwB9AA0ACgBA +AGwAaQBzAHQAIABsADcAOgBsAGUAdgBlAGwAMQANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0A +bgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBs +AGUAdgBlAGwALQB0AGUAeAB0ADoAt/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIA +LQBzAHQAbwBwADoAMwA2AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBt +AGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4A +ZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0 +AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoA +UwB5AG0AYgBvAGwAOwB9AA0ACgBAAGwAaQBzAHQAIABsADcAOgBsAGUAdgBlAGwAMgANAAoACQB7 +AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwA +ZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAbwA7AA0ACgAJAG0AcwBv +AC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoANwAyAC4AMABwAHQAOwANAAoACQBtAHMA +bwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7 +AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMA +bwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBv +AG4AdAAtAGYAYQBtAGkAbAB5ADoAIgBDAG8AdQByAGkAZQByACAATgBlAHcAIgA7AA0ACgAJAG0A +cwBvAC0AYgBpAGQAaQAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAATgBl +AHcAIABSAG8AbQBhAG4AIgA7AH0ADQAKAEAAbABpAHMAdAAgAGwANwA6AGwAZQB2AGUAbAAzAA0A +CgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1 +AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkA +bQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAxADAAOAAuADAAcAB0ADsADQAK +AAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwA +ZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAK +AAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0A +CgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoAQABs +AGkAcwB0ACAAbAA3ADoAbABlAHYAZQBsADQADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4A +dQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABl +AHYAZQBsAC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0A +cwB0AG8AcAA6ADEANAA0AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBt +AGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4A +ZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0 +AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoA +VwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADcAOgBsAGUAdgBlAGwANQAN +AAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIA +dQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJ +AG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMQA4ADAALgAwAHAAdAA7AA0A +CgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBs +AGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0A +CgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwAN +AAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAA +bABpAHMAdAAgAGwANwA6AGwAZQB2AGUAbAA2AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBu +AHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwA +ZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAt +AHMAdABvAHAAOgAyADEANgAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUA +bQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBu +AGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4A +dAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6 +AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAA3ADoAbABlAHYAZQBsADcA +DQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBi +AHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfwOwANAAoA +CQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADIANQAyAC4AMABwAHQAOwAN +AAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoA +bABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwAN +AAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsA +DQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBA +AGwAaQBzAHQAIABsADcAOgBsAGUAdgBlAGwAOAANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0A +bgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBs +AGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIA +LQBzAHQAbwBwADoAMgA4ADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1 +AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkA +bgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBu +AHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkA +OgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwANwA6AGwAZQB2AGUAbAA5 +AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoA +YgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAK +AAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAzADIANAAuADAAcAB0ADsA +DQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6 +AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsA +DQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7 +AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoA +QABsAGkAcwB0ACAAbAA4AA0ACgAJAHsAbQBzAG8ALQBsAGkAcwB0AC0AaQBkADoAOQA2ADQAOAA1 +ADAAMQA2ADcAOwANAAoACQBtAHMAbwAtAGwAaQBzAHQALQB0AGUAbQBwAGwAYQB0AGUALQBpAGQA +cwA6AC0AMQAwADMAMgA3ADEAMAAxADMAMgA7AH0ADQAKAEAAbABpAHMAdAAgAGwAOAA6AGwAZQB2 +AGUAbAAxAA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0A +YQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn +8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAzADYALgAwAHAA +dAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBv +AG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAA +dAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABw +AHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0A +DQAKAEAAbABpAHMAdAAgAGwAOAA6AGwAZQB2AGUAbAAyAA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBl +AGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMA +bwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0 +AGEAYgAtAHMAdABvAHAAOgA3ADIALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0A +bgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAt +AGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYA +bwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBs +AHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwAOAA6AGwAZQB2AGUA +bAAzAA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0 +ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsA +DQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAxADAAOAAuADAAcAB0 +ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8A +bgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0 +ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAA +dAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQAN +AAoAQABsAGkAcwB0ACAAbAA4ADoAbABlAHYAZQBsADQADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUA +bAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBv +AC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQA +YQBiAC0AcwB0AG8AcAA6ADEANAA0AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAt +AG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQA +LQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBm +AG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkA +bAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADgAOgBsAGUAdgBl +AGwANQANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEA +dAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7 +AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMQA4ADAALgAwAHAA +dAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBv +AG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAA +dAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABw +AHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0A +DQAKAEAAbABpAHMAdAAgAGwAOAA6AGwAZQB2AGUAbAA2AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBl +AGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMA +bwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0 +AGEAYgAtAHMAdABvAHAAOgAyADEANgAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwA +LQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0 +AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0A +ZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBp +AGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAA4ADoAbABlAHYA +ZQBsADcADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBh +AHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfw +OwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADIANQAyAC4AMABw +AHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkA +bwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABw +AHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAA +cAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9 +AA0ACgBAAGwAaQBzAHQAIABsADgAOgBsAGUAdgBlAGwAOAANAAoACQB7AG0AcwBvAC0AbABlAHYA +ZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBz +AG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0A +dABhAGIALQBzAHQAbwBwADoAMgA4ADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBs +AC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgA +dAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAt +AGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0A +aQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwAOAA6AGwAZQB2 +AGUAbAA5AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0A +YQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn +8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAzADIANAAuADAA +cAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABp +AG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAA +cAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAw +AHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsA +fQANAAoAQABsAGkAcwB0ACAAbAA5AA0ACgAJAHsAbQBzAG8ALQBsAGkAcwB0AC0AaQBkADoAMQA0 +ADcANwA4ADYAOAA3ADMAOAA7AA0ACgAJAG0AcwBvAC0AbABpAHMAdAAtAHQAZQBtAHAAbABhAHQA +ZQAtAGkAZABzADoALQAyADYAOAA5ADIAMQA0ADcAMgA7AH0ADQAKAEAAbABpAHMAdAAgAGwAOQA6 +AGwAZQB2AGUAbAAxAA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYA +bwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4 +AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAzADYA +LgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBp +AHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgA +LgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAw +AC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcA +cwA7AH0ADQAKAEAAbABpAHMAdAAgAGwAOQA6AGwAZQB2AGUAbAAyAA0ACgAJAHsAbQBzAG8ALQBs +AGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoA +CQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBl +AGwALQB0AGEAYgAtAHMAdABvAHAAOgA3ADIALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYA +ZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABl +AHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMA +aQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBh +AG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwAOQA6AGwA +ZQB2AGUAbAAzAA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwBy +AG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQA +OgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAxADAAOAAu +ADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkA +dABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAu +ADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAA +LgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBz +ADsAfQANAAoAQABsAGkAcwB0ACAAbAA5ADoAbABlAHYAZQBsADQADQAKAAkAewBtAHMAbwAtAGwA +ZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJ +AG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUA +bAAtAHQAYQBiAC0AcwB0AG8AcAA6ADEANAA0AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2 +AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQA +ZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBz +AGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYA +YQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADkAOgBs +AGUAdgBlAGwANQANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8A +cgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0 +ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMQA4ADAA +LgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBp +AHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgA +LgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAw +AC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcA +cwA7AH0ADQAKAEAAbABpAHMAdAAgAGwAOQA6AGwAZQB2AGUAbAA2AA0ACgAJAHsAbQBzAG8ALQBs +AGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoA +CQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBl +AGwALQB0AGEAYgAtAHMAdABvAHAAOgAyADEANgAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUA +dgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0 +AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4A +cwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBm +AGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAA5ADoA +bABlAHYAZQBsADcADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBv +AHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgA +dAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADIANQAy +AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMA +aQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4 +AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEA +MAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBn +AHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADkAOgBsAGUAdgBlAGwAOAANAAoACQB7AG0AcwBvAC0A +bABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAK +AAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYA +ZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMgA4ADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABl +AHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkA +dABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBu +AHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0A +ZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwAOQA6 +AGwAZQB2AGUAbAA5AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYA +bwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4 +AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAzADIA +NAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBz +AGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEA +OAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAx +ADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4A +ZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAAxADAADQAKAAkAewBtAHMAbwAtAGwAaQBzAHQALQBp +AGQAOgAxADUANwA5ADMAMQA4ADUAOAA5ADsADQAKAAkAbQBzAG8ALQBsAGkAcwB0AC0AdABlAG0A +cABsAGEAdABlAC0AaQBkAHMAOgAtADEAMAA2ADUAMAA3ADQAOAA2ADAAOwB9AA0ACgBAAGwAaQBz +AHQAIABsADEAMAA6AGwAZQB2AGUAbAAxAA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUA +bQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2 +AGUAbAAtAHQAZQB4AHQAOgC38DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMA +dABvAHAAOgAzADYALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBl +AHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUA +bgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBz +AGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBTAHkA +bQBiAG8AbAA7AH0ADQAKAEAAbABpAHMAdAAgAGwAMQAwADoAbABlAHYAZQBsADIADQAKAAkAewBt +AHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUA +dAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AG8AOwANAAoACQBtAHMAbwAt +AGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADcAMgAuADAAcAB0ADsADQAKAAkAbQBzAG8A +LQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwAN +AAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8A +LQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBu +AHQALQBmAGEAbQBpAGwAeQA6ACIAQwBvAHUAcgBpAGUAcgAgAE4AZQB3ACIAOwANAAoACQBtAHMA +bwAtAGIAaQBkAGkALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAFQAaQBtAGUAcwAgAE4AZQB3 +ACAAUgBvAG0AYQBuACIAOwB9AA0ACgBAAGwAaQBzAHQAIABsADEAMAA6AGwAZQB2AGUAbAAzAA0A +CgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1 +AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkA +bQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAxADAAOAAuADAAcAB0ADsADQAK +AAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwA +ZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAK +AAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0A +CgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoAQABs +AGkAcwB0ACAAbAAxADAAOgBsAGUAdgBlAGwANAANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0A +bgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBs +AGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIA +LQBzAHQAbwBwADoAMQA0ADQALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1 +AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkA +bgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBu +AHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkA +OgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwAMQAwADoAbABlAHYAZQBs +ADUADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBhAHQA +OgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfwOwAN +AAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADEAOAAwAC4AMABwAHQA +OwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBu +ADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQA +OwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0 +ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0A +CgBAAGwAaQBzAHQAIABsADEAMAA6AGwAZQB2AGUAbAA2AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBl +AGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMA +bwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0 +AGEAYgAtAHMAdABvAHAAOgAyADEANgAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwA +LQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0 +AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0A +ZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBp +AGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAAxADAAOgBsAGUA +dgBlAGwANwANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBt +AGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoA +p/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMgA1ADIALgAw +AHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQA +aQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAw +AHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4A +MABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7 +AH0ADQAKAEAAbABpAHMAdAAgAGwAMQAwADoAbABlAHYAZQBsADgADQAKAAkAewBtAHMAbwAtAGwA +ZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJ +AG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUA +bAAtAHQAYQBiAC0AcwB0AG8AcAA6ADIAOAA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2 +AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQA +ZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBz +AGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYA +YQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADEAMAA6 +AGwAZQB2AGUAbAA5AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYA +bwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4 +AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAzADIA +NAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBz +AGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEA +OAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAx +ADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4A +ZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAAxADEADQAKAAkAewBtAHMAbwAtAGwAaQBzAHQALQBp +AGQAOgAxADkAOQAxADcAOQAwADgANAA3ADsADQAKAAkAbQBzAG8ALQBsAGkAcwB0AC0AdABlAG0A +cABsAGEAdABlAC0AaQBkAHMAOgAtADMAOQA4ADAAMQAyADIAMgA7AH0ADQAKAEAAbABpAHMAdAAg +AGwAMQAxADoAbABlAHYAZQBsADEADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIA +ZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBs +AC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8A +cAA6ADMANgAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAt +AHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQA +OgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6 +AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcA +ZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAAxADEAOgBsAGUAdgBlAGwAMgANAAoACQB7 +AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwA +ZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBv +AC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoANwAyAC4AMABwAHQAOwANAAoACQBtAHMA +bwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7 +AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMA +bwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBv +AG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQA +IABsADEAMQA6AGwAZQB2AGUAbAAzAA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBi +AGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUA +bAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABv +AHAAOgAxADAAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUA +cgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBu +AHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMA +aQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBu +AGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAAxADEAOgBsAGUAdgBlAGwANAANAAoA +CQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBs +AGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0A +cwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMQA0ADQALgAwAHAAdAA7AA0ACgAJ +AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUA +ZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJ +AG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoA +CQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABp +AHMAdAAgAGwAMQAxADoAbABlAHYAZQBsADUADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4A +dQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABl +AHYAZQBsAC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0A +cwB0AG8AcAA6ADEAOAAwAC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBt +AGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4A +ZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0 +AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoA +VwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADEAMQA6AGwAZQB2AGUAbAA2 +AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoA +YgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAK +AAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAyADEANgAuADAAcAB0ADsA +DQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6 +AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsA +DQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7 +AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoA +QABsAGkAcwB0ACAAbAAxADEAOgBsAGUAdgBlAGwANwANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBs +AC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8A +LQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABh +AGIALQBzAHQAbwBwADoAMgA1ADIALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0A +bgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAt +AGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYA +bwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBs +AHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwAMQAxADoAbABlAHYA +ZQBsADgADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBh +AHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfw +OwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADIAOAA4AC4AMABw +AHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkA +bwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABw +AHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAA +cAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9 +AA0ACgBAAGwAaQBzAHQAIABsADEAMQA6AGwAZQB2AGUAbAA5AA0ACgAJAHsAbQBzAG8ALQBsAGUA +dgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBt +AHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwA +LQB0AGEAYgAtAHMAdABvAHAAOgAzADIANAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBl +AGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUA +eAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBp +AC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEA +bQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAAxADIADQAK +AAkAewBtAHMAbwAtAGwAaQBzAHQALQBpAGQAOgAyADAANgAxADIAMAAzADAAOAA3ADsADQAKAAkA +bQBzAG8ALQBsAGkAcwB0AC0AdABlAG0AcABsAGEAdABlAC0AaQBkAHMAOgAxADEAOAA2ADgAOAAw +ADkAMgA4ADsAfQANAAoAQABsAGkAcwB0ACAAbAAxADIAOgBsAGUAdgBlAGwAMQANAAoACQB7AG0A +cwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0 +ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0A +bABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMwA2AC4AMABwAHQAOwANAAoACQBtAHMAbwAt +AGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0A +CgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAt +AGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4A +dAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABs +ADEAMgA6AGwAZQB2AGUAbAAyAA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUA +cgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAt +AHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAA +OgA3ADIALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBw +AG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoA +LQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBl +ADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQA +aQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwAMQAyADoAbABlAHYAZQBsADMADQAKAAkAewBt +AHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUA +dAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMAbwAt +AGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADEAMAA4AC4AMABwAHQAOwANAAoACQBtAHMA +bwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7 +AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMA +bwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBv +AG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQA +IABsADEAMgA6AGwAZQB2AGUAbAA0AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBi +AGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUA +bAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABv +AHAAOgAxADQANAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUA +cgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBu +AHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMA +aQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBu +AGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAAxADIAOgBsAGUAdgBlAGwANQANAAoA +CQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBs +AGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0A +cwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMQA4ADAALgAwAHAAdAA7AA0ACgAJ +AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUA +ZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJ +AG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoA +CQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABp +AHMAdAAgAGwAMQAyADoAbABlAHYAZQBsADYADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4A +dQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABl +AHYAZQBsAC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0A +cwB0AG8AcAA6ADIAMQA2AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBt +AGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4A +ZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0 +AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoA +VwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADEAMgA6AGwAZQB2AGUAbAA3 +AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoA +YgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAK +AAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAyADUAMgAuADAAcAB0ADsA +DQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6 +AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsA +DQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7 +AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoA +QABsAGkAcwB0ACAAbAAxADIAOgBsAGUAdgBlAGwAOAANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBs +AC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8A +LQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABh +AGIALQBzAHQAbwBwADoAMgA4ADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0A +bgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAt +AGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYA +bwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBs +AHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwAMQAyADoAbABlAHYA +ZQBsADkADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBh +AHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfw +OwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADMAMgA0AC4AMABw +AHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkA +bwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABw +AHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAA +cAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9 +AA0ACgBvAGwADQAKAAkAewBtAGEAcgBnAGkAbgAtAGIAbwB0AHQAbwBtADoAMABjAG0AOwB9AA0A +CgB1AGwADQAKAAkAewBtAGEAcgBnAGkAbgAtAGIAbwB0AHQAbwBtADoAMABjAG0AOwB9AA0ACgAt +AC0APgANAAoAPAAvAHMAdAB5AGwAZQA+AA0ACgA8ACEALQAtAFsAaQBmACAAZwB0AGUAIABtAHMA +bwAgADEAMABdAD4ADQAKADwAcwB0AHkAbABlAD4ADQAKACAALwAqACAAUwB0AHkAbABlACAARABl +AGYAaQBuAGkAdABpAG8AbgBzACAAKgAvAA0ACgAgAHQAYQBiAGwAZQAuAE0AcwBvAE4AbwByAG0A +YQBsAFQAYQBiAGwAZQANAAoACQB7AG0AcwBvAC0AcwB0AHkAbABlAC0AbgBhAG0AZQA6ACIAVABh +AGIAbABlACAATgBvAHIAbQBhAGwAIgA7AA0ACgAJAG0AcwBvAC0AdABzAHQAeQBsAGUALQByAG8A +dwBiAGEAbgBkAC0AcwBpAHoAZQA6ADAAOwANAAoACQBtAHMAbwAtAHQAcwB0AHkAbABlAC0AYwBv +AGwAYgBhAG4AZAAtAHMAaQB6AGUAOgAwADsADQAKAAkAbQBzAG8ALQBzAHQAeQBsAGUALQBuAG8A +cwBoAG8AdwA6AHkAZQBzADsADQAKAAkAbQBzAG8ALQBzAHQAeQBsAGUALQBwAHIAaQBvAHIAaQB0 +AHkAOgA5ADkAOwANAAoACQBtAHMAbwAtAHMAdAB5AGwAZQAtAHAAYQByAGUAbgB0ADoAIgAiADsA +DQAKAAkAbQBzAG8ALQBwAGEAZABkAGkAbgBnAC0AYQBsAHQAOgAwAGMAbQAgADUALgA0AHAAdAAg +ADAAYwBtACAANQAuADQAcAB0ADsADQAKAAkAbQBzAG8ALQBwAGEAcgBhAC0AbQBhAHIAZwBpAG4A +OgAwAGMAbQA7AA0ACgAJAG0AcwBvAC0AcABhAHIAYQAtAG0AYQByAGcAaQBuAC0AYgBvAHQAdABv +AG0AOgAuADAAMAAwADEAcAB0ADsADQAKAAkAbQBzAG8ALQBwAGEAZwBpAG4AYQB0AGkAbwBuADoA +dwBpAGQAbwB3AC0AbwByAHAAaABhAG4AOwANAAoACQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAu +ADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAIgBUAGkAbQBlAHMAIABOAGUA +dwAgAFIAbwBtAGEAbgAiACwAcwBlAHIAaQBmADsAfQANAAoAPAAvAHMAdAB5AGwAZQA+AA0ACgA8 +ACEAWwBlAG4AZABpAGYAXQAtAC0APgA8ACEALQAtAFsAaQBmACAAZwB0AGUAIABtAHMAbwAgADkA +XQA+ADwAeABtAGwAPgANAAoAIAA8AG8AOgBzAGgAYQBwAGUAZABlAGYAYQB1AGwAdABzACAAdgA6 +AGUAeAB0AD0AIgBlAGQAaQB0ACIAIABzAHAAaQBkAG0AYQB4AD0AIgAxADAAMgA2ACIALwA+AA0A +CgA8AC8AeABtAGwAPgA8ACEAWwBlAG4AZABpAGYAXQAtAC0APgA8ACEALQAtAFsAaQBmACAAZwB0 +AGUAIABtAHMAbwAgADkAXQA+ADwAeABtAGwAPgANAAoAIAA8AG8AOgBzAGgAYQBwAGUAbABhAHkA +bwB1AHQAIAB2ADoAZQB4AHQAPQAiAGUAZABpAHQAIgA+AA0ACgAgACAAPABvADoAaQBkAG0AYQBw +ACAAdgA6AGUAeAB0AD0AIgBlAGQAaQB0ACIAIABkAGEAdABhAD0AIgAxACIALwA+AA0ACgAgADwA +LwBvADoAcwBoAGEAcABlAGwAYQB5AG8AdQB0AD4APAAvAHgAbQBsAD4APAAhAFsAZQBuAGQAaQBm +AF0ALQAtAD4ADQAKADwALwBoAGUAYQBkAD4ADQAKAA0ACgA8AGIAbwBkAHkAIABsAGEAbgBnAD0A +RQBOAC0AVQBTACAAbABpAG4AawA9AGIAbAB1AGUAIAB2AGwAaQBuAGsAPQBwAHUAcgBwAGwAZQAg +AHMAdAB5AGwAZQA9ACcAdABhAGIALQBpAG4AdABlAHIAdgBhAGwAOgAzADYALgAwAHAAdAAnAD4A +DQAKAA0ACgA8AGQAaQB2ACAAYwBsAGEAcwBzAD0AVwBvAHIAZABTAGUAYwB0AGkAbwBuADEAPgAN +AAoADQAKADwAaAAxAD4APABzAHAAYQBuACAAYwBsAGEAcwBzAD0AUwBwAGUAbABsAEUAPgA8AHMA +cABhAG4AIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBm +AGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAnAD4AcABtAHAA +PAAvAHMAcABhAG4APgA8AC8AcwBwAGEAbgA+ADwAcwBwAGEAbgANAAoAcwB0AHkAbABlAD0AJwBt +AHMAbwAtAGYAYQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAFQAaQBtAGUA +cwAgAE4AZQB3ACAAUgBvAG0AYQBuACIAJwA+ACAAZQBuAGgAYQBuAGMAZQBtAGUAbgB0ACAALQAg +AHMAcABpAGsAZQAsACAAcwBhAGkAbAANAAoAYQBuAGQAIAB1AG4AaQB0ACAAdABlAHMAdAA8AG8A +OgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABhAG4APgA8AC8AaAAxAD4ADQAKAA0ACgA8AGQAaQB2 +AD4ADQAKAA0ACgA8AHUAbAAgAHQAeQBwAGUAPQBkAGkAcwBjAD4ADQAKACAAPABsAGkAIABjAGwA +YQBzAHMAPQBNAHMAbwBOAG8AcgBtAGEAbAAgAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBtAGEAcgBn +AGkAbgAtAHQAbwBwAC0AYQBsAHQAOgBhAHUAdABvADsAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAGIA +bwB0AHQAbwBtAC0AYQBsAHQAOgBhAHUAdABvADsADQAKACAAIAAgACAAIABtAHMAbwAtAGwAaQBz +AHQAOgBsADcAIABsAGUAdgBlAGwAMQAgAGwAZgBvADEAOwB0AGEAYgAtAHMAdABvAHAAcwA6AGwA +aQBzAHQAIAAzADYALgAwAHAAdAAnAD4APABzAHAAYQBuACAAcwB0AHkAbABlAD0AJwBtAHMAbwAt +AGYAYQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgANAAoAIAAgACAAIAAgACIA +VABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAnAD4APABhACAAaAByAGUAZgA9ACIAIwBw +AG0AcABlAG4AaABhAG4AYwBlAG0AZQBuAHQALQBzAHAAaQBrAGUALABzAGEAaQBsAGEAbgBkAHUA +bgBpAHQAdABlAHMAdAAtAFMAcAAiAD4AMQAuAA0ACgAgACAAIAAgACAAUwBwAGkAawBlACAAYwBo +AGEAbgBnAGUAcwA8AC8AYQA+ADwAbwA6AHAAPgA8AC8AbwA6AHAAPgA8AC8AcwBwAGEAbgA+ADwA +LwBsAGkAPgANAAoAIAA8AGwAaQAgAGMAbABhAHMAcwA9AE0AcwBvAE4AbwByAG0AYQBsACAAcwB0 +AHkAbABlAD0AJwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AdABvAHAALQBhAGwAdAA6AGEAdQB0AG8A +OwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AYgBvAHQAdABvAG0ALQBhAGwAdAA6AGEAdQB0AG8AOwAN +AAoAIAAgACAAIAAgAG0AcwBvAC0AbABpAHMAdAA6AGwANwAgAGwAZQB2AGUAbAAxACAAbABmAG8A +MQA7AHQAYQBiAC0AcwB0AG8AcABzADoAbABpAHMAdAAgADMANgAuADAAcAB0ACcAPgA8AHMAcABh +AG4AIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEA +bQBpAGwAeQA6AA0ACgAgACAAIAAgACAAIgBUAGkAbQBlAHMAIABOAGUAdwAgAFIAbwBtAGEAbgAi +ACcAPgA8AGEAIABoAHIAZQBmAD0AIgAjAHAAbQBwAGUAbgBoAGEAbgBjAGUAbQBlAG4AdAAtAHMA +cABpAGsAZQAsAHMAYQBpAGwAYQBuAGQAdQBuAGkAdAB0AGUAcwB0AC0AUwBhACIAPgAyAC4ADQAK +ACAAIAAgACAAIABTAGEAaQBsAC0APABzAHAAYQBuACAAYwBsAGEAcwBzAD0AUwBwAGUAbABsAEUA +PgByAGkAcwBjAHYAPAAvAHMAcABhAG4APgA8AC8AYQA+ADwAbwA6AHAAPgA8AC8AbwA6AHAAPgA8 +AC8AcwBwAGEAbgA+ADwALwBsAGkAPgANAAoAIAA8AGwAaQAgAGMAbABhAHMAcwA9AE0AcwBvAE4A +bwByAG0AYQBsACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AdABvAHAALQBh +AGwAdAA6AGEAdQB0AG8AOwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AYgBvAHQAdABvAG0ALQBhAGwA +dAA6AGEAdQB0AG8AOwANAAoAIAAgACAAIAAgAG0AcwBvAC0AbABpAHMAdAA6AGwANwAgAGwAZQB2 +AGUAbAAxACAAbABmAG8AMQA7AHQAYQBiAC0AcwB0AG8AcABzADoAbABpAHMAdAAgADMANgAuADAA +cAB0ACcAPgA8AHMAcABhAG4AIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAt +AGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AA0ACgAgACAAIAAgACAAIgBUAGkAbQBlAHMAIABOAGUA +dwAgAFIAbwBtAGEAbgAiACcAPgA8AGEAIABoAHIAZQBmAD0AIgAjAHAAbQBwAGUAbgBoAGEAbgBj +AGUAbQBlAG4AdAAtAHMAcABpAGsAZQAsAHMAYQBpAGwAYQBuAGQAdQBuAGkAdAB0AGUAcwB0AC0A +VQBuACIAPgAzAC4ADQAKACAAIAAgACAAIABVAG4AaQB0ACAAdABlAHMAdAA8AC8AYQA+ACAAPABv +ADoAcAA+ADwALwBvADoAcAA+ADwALwBzAHAAYQBuAD4APAAvAGwAaQA+AA0ACgA8AC8AdQBsAD4A +DQAKAA0ACgA8AHUAbAAgAHQAeQBwAGUAPQBkAGkAcwBjAD4ADQAKACAAPAB1AGwAIAB0AHkAcABl +AD0AYwBpAHIAYwBsAGUAPgANAAoAIAAgADwAbABpACAAYwBsAGEAcwBzAD0ATQBzAG8ATgBvAHIA +bQBhAGwAIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AbQBhAHIAZwBpAG4ALQB0AG8AcAAtAGEAbAB0 +ADoAYQB1AHQAbwA7AG0AcwBvAC0AbQBhAHIAZwBpAG4ALQBiAG8AdAB0AG8AbQAtAGEAbAB0ADoA +DQAKACAAIAAgACAAIAAgAGEAdQB0AG8AOwBtAHMAbwAtAGwAaQBzAHQAOgBsADcAIABsAGUAdgBl +AGwAMgAgAGwAZgBvADEAOwB0AGEAYgAtAHMAdABvAHAAcwA6AGwAaQBzAHQAIAA3ADIALgAwAHAA +dAAnAD4APABzAHAAYQBuAA0ACgAgACAAIAAgACAAIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AZgBh +AHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAATgBlAHcA +IABSAG8AbQBhAG4AIgAnAD4APABhAA0ACgAgACAAIAAgACAAIABoAHIAZQBmAD0AIgAjAHAAbQBw +AGUAbgBoAGEAbgBjAGUAbQBlAG4AdAAtAHMAcABpAGsAZQAsAHMAYQBpAGwAYQBuAGQAdQBuAGkA +dAB0AGUAcwB0AC0AVABlACIAPgAzAC4AMQAuACAAVABlAHMAdAAgAHAAbABhAG4APAAvAGEAPgAg +ADwAbwA6AHAAPgA8AC8AbwA6AHAAPgA8AC8AcwBwAGEAbgA+ADwALwBsAGkAPgANAAoAIAA8AC8A +dQBsAD4ADQAKADwALwB1AGwAPgANAAoADQAKADwAdQBsACAAdAB5AHAAZQA9AGQAaQBzAGMAPgAN +AAoAIAA8AHUAbAAgAHQAeQBwAGUAPQBjAGkAcgBjAGwAZQA+AA0ACgAgACAAPAB1AGwAIAB0AHkA +cABlAD0AcwBxAHUAYQByAGUAPgANAAoAIAAgACAAPABsAGkAIABjAGwAYQBzAHMAPQBNAHMAbwBO +AG8AcgBtAGEAbAAgAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAHQAbwBwAC0A +YQBsAHQAOgBhAHUAdABvADsAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAGIAbwB0AHQAbwBtAC0AYQBs +AHQAOgANAAoAIAAgACAAIAAgACAAIABhAHUAdABvADsAbQBzAG8ALQBsAGkAcwB0ADoAbAA3ACAA +bABlAHYAZQBsADMAIABsAGYAbwAxADsAdABhAGIALQBzAHQAbwBwAHMAOgBsAGkAcwB0ACAAMQAw +ADgALgAwAHAAdAAnAD4APABzAHAAYQBuAA0ACgAgACAAIAAgACAAIAAgAHMAdAB5AGwAZQA9ACcA +bQBzAG8ALQBmAGEAcgBlAGEAcwB0AC0AZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAIgBUAGkAbQBl +AHMAIABOAGUAdwAgAFIAbwBtAGEAbgAiACcAPgA8AGEADQAKACAAIAAgACAAIAAgACAAaAByAGUA +ZgA9ACIAIwBwAG0AcABlAG4AaABhAG4AYwBlAG0AZQBuAHQALQBzAHAAaQBrAGUALABzAGEAaQBs +AGEAbgBkAHUAbgBpAHQAdABlAHMAdAAtAEMAUwAiAD4AMwAuADEALgAxAC4AIABDAFMAUgAgAGEA +YwBjAGUAcwBzADwALwBhAD4APABvADoAcAA+ADwALwBvADoAcAA+ADwALwBzAHAAYQBuAD4APAAv +AGwAaQA+AA0ACgAgACAAIAA8AGwAaQAgAGMAbABhAHMAcwA9AE0AcwBvAE4AbwByAG0AYQBsACAA +cwB0AHkAbABlAD0AJwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AdABvAHAALQBhAGwAdAA6AGEAdQB0 +AG8AOwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AYgBvAHQAdABvAG0ALQBhAGwAdAA6AA0ACgAgACAA +IAAgACAAIAAgAGEAdQB0AG8AOwBtAHMAbwAtAGwAaQBzAHQAOgBsADcAIABsAGUAdgBlAGwAMwAg +AGwAZgBvADEAOwB0AGEAYgAtAHMAdABvAHAAcwA6AGwAaQBzAHQAIAAxADAAOAAuADAAcAB0ACcA +PgA8AHMAcABhAG4ADQAKACAAIAAgACAAIAAgACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAGYAYQBy +AGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAFQAaQBtAGUAcwAgAE4AZQB3ACAA +UgBvAG0AYQBuACIAJwA+ADwAYQANAAoAIAAgACAAIAAgACAAIABoAHIAZQBmAD0AIgAjAHAAbQBw +AGUAbgBoAGEAbgBjAGUAbQBlAG4AdAAtAHMAcABpAGsAZQAsAHMAYQBpAGwAYQBuAGQAdQBuAGkA +dAB0AGUAcwB0AC0ATQBlACIAPgAzAC4AMQAuADIALgAgAE0AZQBtAG8AcgB5ACAAYQBjAGMAZQBz +AHMAPAAvAGEAPgANAAoAIAAgACAAIAAgACAAIAA8AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHMA +cABhAG4APgA8AC8AbABpAD4ADQAKACAAIAA8AC8AdQBsAD4ADQAKACAAPAAvAHUAbAA+AA0ACgA8 +AC8AdQBsAD4ADQAKAA0ACgA8AHUAbAAgAHQAeQBwAGUAPQBkAGkAcwBjAD4ADQAKACAAPAB1AGwA +IAB0AHkAcABlAD0AYwBpAHIAYwBsAGUAPgANAAoAIAAgADwAdQBsACAAdAB5AHAAZQA9AHMAcQB1 +AGEAcgBlAD4ADQAKACAAIAAgADwAdQBsACAAdAB5AHAAZQA9AHMAcQB1AGEAcgBlAD4ADQAKACAA +IAAgACAAPABsAGkAIABjAGwAYQBzAHMAPQBNAHMAbwBOAG8AcgBtAGEAbAAgAHMAdAB5AGwAZQA9 +ACcAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAHQAbwBwAC0AYQBsAHQAOgBhAHUAdABvADsAbQBzAG8A +LQBtAGEAcgBnAGkAbgAtAGIAbwB0AHQAbwBtAC0AYQBsAHQAOgANAAoAIAAgACAAIAAgACAAIAAg +AGEAdQB0AG8AOwBtAHMAbwAtAGwAaQBzAHQAOgBsADcAIABsAGUAdgBlAGwANAAgAGwAZgBvADEA +OwB0AGEAYgAtAHMAdABvAHAAcwA6AGwAaQBzAHQAIAAxADQANAAuADAAcAB0ACcAPgA8AHMAcABh +AG4ADQAKACAAIAAgACAAIAAgACAAIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AZgBhAHIAZQBhAHMA +dAAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBh +AG4AIgAnAD4APABhAA0ACgAgACAAIAAgACAAIAAgACAAaAByAGUAZgA9ACIAIwBwAG0AcABlAG4A +aABhAG4AYwBlAG0AZQBuAHQALQBzAHAAaQBrAGUALABzAGEAaQBsAGEAbgBkAHUAbgBpAHQAdABl +AHMAdAAtAE4AbwAiAD4AMwAuADEALgAyAC4AMQAuACAATgBvAG4ALQBzAGgAYQByAGUAIABtAG8A +ZABlADwALwBhAD4APABvADoAcAA+ADwALwBvADoAcAA+ADwALwBzAHAAYQBuAD4APAAvAGwAaQA+ +AA0ACgAgACAAIAAgADwAbABpACAAYwBsAGEAcwBzAD0ATQBzAG8ATgBvAHIAbQBhAGwAIABzAHQA +eQBsAGUAPQAnAG0AcwBvAC0AbQBhAHIAZwBpAG4ALQB0AG8AcAAtAGEAbAB0ADoAYQB1AHQAbwA7 +AG0AcwBvAC0AbQBhAHIAZwBpAG4ALQBiAG8AdAB0AG8AbQAtAGEAbAB0ADoADQAKACAAIAAgACAA +IAAgACAAIABhAHUAdABvADsAbQBzAG8ALQBsAGkAcwB0ADoAbAA3ACAAbABlAHYAZQBsADQAIABs +AGYAbwAxADsAdABhAGIALQBzAHQAbwBwAHMAOgBsAGkAcwB0ACAAMQA0ADQALgAwAHAAdAAnAD4A +PABzAHAAYQBuAA0ACgAgACAAIAAgACAAIAAgACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAGYAYQBy +AGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAFQAaQBtAGUAcwAgAE4AZQB3ACAA +UgBvAG0AYQBuACIAJwA+ADwAYQANAAoAIAAgACAAIAAgACAAIAAgAGgAcgBlAGYAPQAiACMAcABt +AHAAZQBuAGgAYQBuAGMAZQBtAGUAbgB0AC0AcwBwAGkAawBlACwAcwBhAGkAbABhAG4AZAB1AG4A +aQB0AHQAZQBzAHQALQBTAGgAIgA+ADMALgAxAC4AMgAuADIALgAgAFMAaABhAHIAZQAgAG0AbwBk +AGUAPAAvAGEAPgA8AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABhAG4APgA8AC8AbABpAD4A +DQAKACAAIAAgADwALwB1AGwAPgANAAoAIAAgADwALwB1AGwAPgANAAoAIAA8AC8AdQBsAD4ADQAK +ADwALwB1AGwAPgANAAoADQAKADwAdQBsACAAdAB5AHAAZQA9AGQAaQBzAGMAPgANAAoAIAA8AHUA +bAAgAHQAeQBwAGUAPQBjAGkAcgBjAGwAZQA+AA0ACgAgACAAPAB1AGwAIAB0AHkAcABlAD0AcwBx +AHUAYQByAGUAPgANAAoAIAAgACAAPABsAGkAIABjAGwAYQBzAHMAPQBNAHMAbwBOAG8AcgBtAGEA +bAAgAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAHQAbwBwAC0AYQBsAHQAOgBh +AHUAdABvADsAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAGIAbwB0AHQAbwBtAC0AYQBsAHQAOgANAAoA +IAAgACAAIAAgACAAIABhAHUAdABvADsAbQBzAG8ALQBsAGkAcwB0ADoAbAA3ACAAbABlAHYAZQBs +ADMAIABsAGYAbwAxADsAdABhAGIALQBzAHQAbwBwAHMAOgBsAGkAcwB0ACAAMQAwADgALgAwAHAA +dAAnAD4APABzAHAAYQBuAA0ACgAgACAAIAAgACAAIAAgAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBm +AGEAcgBlAGEAcwB0AC0AZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAIgBUAGkAbQBlAHMAIABOAGUA +dwAgAFIAbwBtAGEAbgAiACcAPgA8AGEADQAKACAAIAAgACAAIAAgACAAaAByAGUAZgA9ACIAIwBw +AG0AcABlAG4AaABhAG4AYwBlAG0AZQBuAHQALQBzAHAAaQBrAGUALABzAGEAaQBsAGEAbgBkAHUA +bgBpAHQAdABlAHMAdAAtAFQAZQAiAD4AMwAuADEALgAzAC4AIABUAGUAcwB0ACAAcgBhAG4AZwBl +ADwALwBhAD4APABvADoAcAA+ADwALwBvADoAcAA+ADwALwBzAHAAYQBuAD4APAAvAGwAaQA+AA0A +CgAgACAAPAAvAHUAbAA+AA0ACgAgADwALwB1AGwAPgANAAoAPAAvAHUAbAA+AA0ACgANAAoAPAB1 +AGwAIAB0AHkAcABlAD0AZABpAHMAYwA+AA0ACgAgADwAdQBsACAAdAB5AHAAZQA9AGMAaQByAGMA +bABlAD4ADQAKACAAIAA8AGwAaQAgAGMAbABhAHMAcwA9AE0AcwBvAE4AbwByAG0AYQBsACAAcwB0 +AHkAbABlAD0AJwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AdABvAHAALQBhAGwAdAA6AGEAdQB0AG8A +OwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AYgBvAHQAdABvAG0ALQBhAGwAdAA6AA0ACgAgACAAIAAg +ACAAIABhAHUAdABvADsAbQBzAG8ALQBsAGkAcwB0ADoAbAA3ACAAbABlAHYAZQBsADIAIABsAGYA +bwAxADsAdABhAGIALQBzAHQAbwBwAHMAOgBsAGkAcwB0ACAANwAyAC4AMABwAHQAJwA+ADwAcwBw +AGEAbgANAAoAIAAgACAAIAAgACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAGYAYQByAGUAYQBzAHQA +LQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAFQAaQBtAGUAcwAgAE4AZQB3ACAAUgBvAG0AYQBu +ACIAJwA+ADwAYQANAAoAIAAgACAAIAAgACAAaAByAGUAZgA9ACIAIwBwAG0AcABlAG4AaABhAG4A +YwBlAG0AZQBuAHQALQBzAHAAaQBrAGUALABzAGEAaQBsAGEAbgBkAHUAbgBpAHQAdABlAHMAdAAt +AFQAZQAiAD4AMwAuADIALgAgAFQAZQBzAHQAIABpAG0AcABsAGUAbQBlAG4AdABhAHQAaQBvAG4A +PAAvAGEAPgA8AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABhAG4APgA8AC8AbABpAD4ADQAK +ACAAPAAvAHUAbAA+AA0ACgA8AC8AdQBsAD4ADQAKAA0ACgA8AC8AZABpAHYAPgANAAoADQAKADwA +aAAxACAAaQBkAD0AIgBwAG0AcABlAG4AaABhAG4AYwBlAG0AZQBuAHQALQBzAHAAaQBrAGUALABz +AGEAaQBsAGEAbgBkAHUAbgBpAHQAdABlAHMAdAAtAFMAcABpAGsAZQBjAGgAYQBuAGcAZQBzACIA +PgA8AHMAcABhAG4ADQAKAGMAbABhAHMAcwA9AG4AaAAtAG4AdQBtAGIAZQByAD4APABzAHAAYQBu +ACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAGYAYQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0A +aQBsAHkAOgAiAFQAaQBtAGUAcwAgAE4AZQB3ACAAUgBvAG0AYQBuACIAJwA+ADEALgAgADwALwBz +AHAAYQBuAD4APAAvAHMAcABhAG4APgA8AHMAcABhAG4ADQAKAHMAdAB5AGwAZQA9ACcAbQBzAG8A +LQBmAGEAcgBlAGEAcwB0AC0AZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAIgBUAGkAbQBlAHMAIABO +AGUAdwAgAFIAbwBtAGEAbgAiACcAPgBTAHAAaQBrAGUAIABjAGgAYQBuAGcAZQBzADwAbwA6AHAA +PgA8AC8AbwA6AHAAPgA8AC8AcwBwAGEAbgA+ADwALwBoADEAPgANAAoADQAKADwAcAA+AEMAaABh +AG4AZwBlAHMAIABoAGEAdgBlACAAYgBlAGUAbgAgAHMAdQBiAG0AaQB0AHQAZQBkACAAdABvACYA +bgBiAHMAcAA7ADwAYQANAAoAaAByAGUAZgA9ACIAaAB0AHQAcABzADoALwAvAGcAaQB0AGgAdQBi +AC4AYwBvAG0ALwBqAG8AeABpAGUALwByAGkAcwBjAHYALQBpAHMAYQAtAHMAaQBtAC4AZwBpAHQA +IgA+AGgAdAB0AHAAcwA6AC8ALwBnAGkAdABoAHUAYgAuAGMAbwBtAC8AagBvAHgAaQBlAC8AcgBp +AHMAYwB2AC0AaQBzAGEALQBzAGkAbQAuAGcAaQB0ADwALwBhAD4AJgBuAGIAcwBwADsAPABvADoA +cAA+ADwALwBvADoAcAA+ADwALwBwAD4ADQAKAA0ACgA8AGgAMQAgAGkAZAA9ACIAcABtAHAAZQBu +AGgAYQBuAGMAZQBtAGUAbgB0AC0AcwBwAGkAawBlACwAcwBhAGkAbABhAG4AZAB1AG4AaQB0AHQA +ZQBzAHQALQBTAGEAaQBsAC0AcgBpAHMAYwB2ACIAPgA8AHMAcABhAG4AIABjAGwAYQBzAHMAPQBu +AGgALQBuAHUAbQBiAGUAcgA+ADwAcwBwAGEAbgANAAoAcwB0AHkAbABlAD0AJwBtAHMAbwAtAGYA +YQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAFQAaQBtAGUAcwAgAE4AZQB3 +ACAAUgBvAG0AYQBuACIAJwA+ADIALgAgADwALwBzAHAAYQBuAD4APAAvAHMAcABhAG4APgA8AHMA +cABhAG4ADQAKAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBmAGEAcgBlAGEAcwB0AC0AZgBvAG4AdAAt +AGYAYQBtAGkAbAB5ADoAIgBUAGkAbQBlAHMAIABOAGUAdwAgAFIAbwBtAGEAbgAiACcAPgBTAGEA +aQBsAC0APABzAHAAYQBuACAAYwBsAGEAcwBzAD0AUwBwAGUAbABsAEUAPgByAGkAcwBjAHYAPAAv +AHMAcABhAG4APgA8AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABhAG4APgA8AC8AaAAxAD4A +DQAKAA0ACgA8AHAAIABjAGwAYQBzAHMAPQBNAHMAbwBOAG8AcgBtAGEAbAA+AEMAaABhAG4AZwBl +AHMAIABoAGEAcwAgAGIAZQBlAG4AIABzAHUAYgBtAGkAdAB0AGUAZAAgAHQAbwAgADwAcwBwAGEA +bgAgAHMAdAB5AGwAZQA9ACcAZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgA1AHAAdAA7AA0ACgBm +AG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAFMAZQBnAG8AZQAgAFUASQAiACwAcwBhAG4AcwAtAHMA +ZQByAGkAZgA7AG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6 +ACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAnAD4APABhAA0ACgBoAHIAZQBmAD0A +IgBoAHQAdABwAHMAOgAvAC8AZwBpAHQAaAB1AGIALgBjAG8AbQAvAGoAbwB4AGkAZQAvAHMAYQBp +AGwALQByAGkAcwBjAHYALwBjAG8AbQBtAGkAdABzAC8AbQBhAHMAdABlAHIALwAiACAAdABhAHIA +ZwBlAHQAPQAiAF8AYgBsAGEAbgBrACIADQAKAHQAaQB0AGwAZQA9ACIAaAB0AHQAcABzADoALwAv +AGcAaQB0AGgAdQBiAC4AYwBvAG0ALwBqAG8AeABpAGUALwBzAGEAaQBsAC0AcgBpAHMAYwB2AC8A +YwBvAG0AbQBpAHQAcwAvAG0AYQBzAHQAZQByAC8AIgA+AGgAdAB0AHAAcwA6AC8ALwBnAGkAdABo +AHUAYgAuAGMAbwBtAC8AagBvAHgAaQBlAC8AcwBhAGkAbAAtAHIAaQBzAGMAdgAvADwALwBhAD4A +PABvADoAcAA+ADwALwBvADoAcAA+ADwALwBzAHAAYQBuAD4APAAvAHAAPgANAAoADQAKADwAcAA+ +AEsAbgBvAHcAbgAgAGkAcwBzAHUAZQBzACAAcwBlAGUAJgBuAGIAcwBwADsAPABhAA0ACgBoAHIA +ZQBmAD0AIgBoAHQAdABwAHMAOgAvAC8AZwBpAHQAaAB1AGIALgBjAG8AbQAvAHIAZQBtAHMALQBw +AHIAbwBqAGUAYwB0AC8AcwBhAGkAbAAtAHIAaQBzAGMAdgAvAGkAcwBzAHUAZQBzACIAPgBoAHQA +dABwAHMAOgAvAC8AZwBpAHQAaAB1AGIALgBjAG8AbQAvAHIAZQBtAHMALQBwAHIAbwBqAGUAYwB0 +AC8AcwBhAGkAbAAtAHIAaQBzAGMAdgAvAGkAcwBzAHUAZQBzADwALwBhAD4APAAvAHAAPgANAAoA +DQAKADwAaAAxAD4APABzAHAAYQBuACAAYwBsAGEAcwBzAD0AbgBoAC0AbgB1AG0AYgBlAHIAPgA8 +AHMAcABhAG4AIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQA +LQBmAGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAnAD4AMwAu +AA0ACgA8AC8AcwBwAGEAbgA+ADwALwBzAHAAYQBuAD4APABzAHAAYQBuACAAcwB0AHkAbABlAD0A +JwBtAHMAbwAtAGYAYQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAFQAaQBt +AGUAcwAgAE4AZQB3ACAAUgBvAG0AYQBuACIAJwA+AFUAbgBpAHQAIAB0AGUAcwB0ADwAbwA6AHAA +PgA8AC8AbwA6AHAAPgA8AC8AcwBwAGEAbgA+ADwALwBoADEAPgANAAoADQAKADwAaAAyACAAaQBk +AD0AIgBwAG0AcABlAG4AaABhAG4AYwBlAG0AZQBuAHQALQBzAHAAaQBrAGUALABzAGEAaQBsAGEA +bgBkAHUAbgBpAHQAdABlAHMAdAAtAFQAZQBzAHQAcABsAGEAbgAiAD4APABzAHAAYQBuACAAYwBs +AGEAcwBzAD0AbgBoAC0AbgB1AG0AYgBlAHIAPgA8AHMAcABhAG4ADQAKAHMAdAB5AGwAZQA9ACcA +bQBzAG8ALQBmAGEAcgBlAGEAcwB0AC0AZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAIgBUAGkAbQBl +AHMAIABOAGUAdwAgAFIAbwBtAGEAbgAiACcAPgAzAC4AMQAuACAAPAAvAHMAcABhAG4APgA8AC8A +cwBwAGEAbgA+ADwAcwBwAGEAbgANAAoAcwB0AHkAbABlAD0AJwBtAHMAbwAtAGYAYQByAGUAYQBz +AHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAFQAaQBtAGUAcwAgAE4AZQB3ACAAUgBvAG0A +YQBuACIAJwA+AFQAZQBzAHQAIABwAGwAYQBuADwAbwA6AHAAPgA8AC8AbwA6AHAAPgA8AC8AcwBw +AGEAbgA+ADwALwBoADIAPgANAAoADQAKADwAcAA+AFQAaABlACAAdABlAHMAdAAgAGMAYQBuACAA +YgBlACAAZABpAHYAaQBkAGUAZAAgAGkAbgB0AG8AIAAyACAAZwByAG8AdQBwAHMAOgA8AC8AcAA+ +AA0ACgANAAoAPAB1AGwAIAB0AHkAcABlAD0AcwBxAHUAYQByAGUAPgANAAoAIAA8AGwAaQAgAGMA +bABhAHMAcwA9AE0AcwBvAE4AbwByAG0AYQBsACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAG0AYQBy +AGcAaQBuAC0AdABvAHAALQBhAGwAdAA6AGEAdQB0AG8AOwBtAHMAbwAtAG0AYQByAGcAaQBuAC0A +YgBvAHQAdABvAG0ALQBhAGwAdAA6AGEAdQB0AG8AOwANAAoAIAAgACAAIAAgAG0AcwBvAC0AbABp +AHMAdAA6AGwANgAgAGwAZQB2AGUAbAAxACAAbABmAG8AMgA7AHQAYQBiAC0AcwB0AG8AcABzADoA +bABpAHMAdAAgADMANgAuADAAcAB0ACcAPgA8AHMAcABhAG4AIABzAHQAeQBsAGUAPQAnAG0AcwBv +AC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AA0ACgAgACAAIAAgACAA +IgBUAGkAbQBlAHMAIABOAGUAdwAgAFIAbwBtAGEAbgAiACcAPgBDAFMAUgAgAGEAYwBjAGUAcwBz +ACAAbwBuACAAPABzAHAAYQBuACAAYwBsAGEAcwBzAD0AUwBwAGUAbABsAEUAPgBwAG0AcABhAGQA +ZAByADwALwBzAHAAYQBuAD4ALAAgADwAcwBwAGEAbgANAAoAIAAgACAAIAAgAGMAbABhAHMAcwA9 +AFMAcABlAGwAbABFAD4AcABtAHAAYwBmAGcAPAAvAHMAcABhAG4APgAgAGEAbgBkACAAPABzAHAA +YQBuACAAYwBsAGEAcwBzAD0AUwBwAGUAbABsAEUAPgBtAHMAZQBjAGMAZgBnADwALwBzAHAAYQBu +AD4ALgA8AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABhAG4APgA8AC8AbABpAD4ADQAKACAA +PABsAGkAIABjAGwAYQBzAHMAPQBNAHMAbwBOAG8AcgBtAGEAbAAgAHMAdAB5AGwAZQA9ACcAbQBz +AG8ALQBtAGEAcgBnAGkAbgAtAHQAbwBwAC0AYQBsAHQAOgBhAHUAdABvADsAbQBzAG8ALQBtAGEA +cgBnAGkAbgAtAGIAbwB0AHQAbwBtAC0AYQBsAHQAOgBhAHUAdABvADsADQAKACAAIAAgACAAIABt +AHMAbwAtAGwAaQBzAHQAOgBsADYAIABsAGUAdgBlAGwAMQAgAGwAZgBvADIAOwB0AGEAYgAtAHMA +dABvAHAAcwA6AGwAaQBzAHQAIAAzADYALgAwAHAAdAAnAD4APABzAHAAYQBuACAAcwB0AHkAbABl +AD0AJwBtAHMAbwAtAGYAYQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgANAAoA +IAAgACAAIAAgACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAnAD4AQgBhAHMAZQBk +ACAAbwBuACAAcwBwAGUAYwBpAGYAaQBlAGQAIABDAFMAUgAgAGMAbwBuAGYAaQBnACwAIAB0AGgA +ZQAgAG0AZQBtAG8AcgB5ACAAYQBjAGMAZQBzAHMAIAB2AGkAYQANAAoAIAAgACAAIAAgAFAATQBQ +ADwAbwA6AHAAPgA8AC8AbwA6AHAAPgA8AC8AcwBwAGEAbgA+ADwALwBsAGkAPgANAAoAPAAvAHUA +bAA+AA0ACgANAAoAPABwAD4AVABvACAAbQBhAGsAZQAgAHQAZQBzAHQAcwAgAHMAaQBtAHAAbABl +AHIALAAgAGEAbABsACAAdABlAHMAdAAgAGMAYQBzAGUAcwAgAGEAcgBlACAAcABlAHIAZgBvAHIA +bQBlAGQAIABpAG4AIABmAG8AbABsAG8AdwBpAG4AZwAgAHMAdABlAHAAcwA6ADwALwBwAD4ADQAK +AA0ACgA8AHUAbAAgAHQAeQBwAGUAPQBzAHEAdQBhAHIAZQA+AA0ACgAgADwAbABpACAAYwBsAGEA +cwBzAD0ATQBzAG8ATgBvAHIAbQBhAGwAIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AbQBhAHIAZwBp +AG4ALQB0AG8AcAAtAGEAbAB0ADoAYQB1AHQAbwA7AG0AcwBvAC0AbQBhAHIAZwBpAG4ALQBiAG8A +dAB0AG8AbQAtAGEAbAB0ADoAYQB1AHQAbwA7AA0ACgAgACAAIAAgACAAbQBzAG8ALQBsAGkAcwB0 +ADoAbAA1ACAAbABlAHYAZQBsADEAIABsAGYAbwAzADsAdABhAGIALQBzAHQAbwBwAHMAOgBsAGkA +cwB0ACAAMwA2AC4AMABwAHQAJwA+ADwAcwBwAGEAbgAgAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBm +AGEAcgBlAGEAcwB0AC0AZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoADQAKACAAIAAgACAAIAAiAFQA +aQBtAGUAcwAgAE4AZQB3ACAAUgBvAG0AYQBuACIAJwA+AFMAdABlAHAAIAAxADoAIABzAGUAdAAg +AHUAcAAgAG8AbgBlACAAbwBmACAAYwBvAG4AZgBpAGcAdQByAGEAdABpAG8AbgBzADwAbwA6AHAA +PgA8AC8AbwA6AHAAPgA8AC8AcwBwAGEAbgA+ADwALwBsAGkAPgANAAoAIAA8AGwAaQAgAGMAbABh +AHMAcwA9AE0AcwBvAE4AbwByAG0AYQBsACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAG0AYQByAGcA +aQBuAC0AdABvAHAALQBhAGwAdAA6AGEAdQB0AG8AOwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AYgBv +AHQAdABvAG0ALQBhAGwAdAA6AGEAdQB0AG8AOwANAAoAIAAgACAAIAAgAG0AcwBvAC0AbABpAHMA +dAA6AGwANQAgAGwAZQB2AGUAbAAxACAAbABmAG8AMwA7AHQAYQBiAC0AcwB0AG8AcABzADoAbABp +AHMAdAAgADMANgAuADAAcAB0ACcAPgA8AHMAcABhAG4AIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0A +ZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AA0ACgAgACAAIAAgACAAIgBU +AGkAbQBlAHMAIABOAGUAdwAgAFIAbwBtAGEAbgAiACcAPgBTAHQAZQBwACAAMgA6ACAAdAByAHkA +IAB0AG8AIABhAGMAYwBlAHMAcwAgAEMAUwBSACgAcwApACAAbwByACAAbQBlAG0AbwByAGkAZQBz +ACwAIAByAGUAYwBvAHIAZAAgAGUAcgByAG8AcgBzAA0ACgAgACAAIAAgACAAaQBmACAAYQBuAHkA +PABvADoAcAA+ADwALwBvADoAcAA+ADwALwBzAHAAYQBuAD4APAAvAGwAaQA+AA0ACgAgADwAbABp +ACAAYwBsAGEAcwBzAD0ATQBzAG8ATgBvAHIAbQBhAGwAIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0A +bQBhAHIAZwBpAG4ALQB0AG8AcAAtAGEAbAB0ADoAYQB1AHQAbwA7AG0AcwBvAC0AbQBhAHIAZwBp +AG4ALQBiAG8AdAB0AG8AbQAtAGEAbAB0ADoAYQB1AHQAbwA7AA0ACgAgACAAIAAgACAAbQBzAG8A +LQBsAGkAcwB0ADoAbAA1ACAAbABlAHYAZQBsADEAIABsAGYAbwAzADsAdABhAGIALQBzAHQAbwBw +AHMAOgBsAGkAcwB0ACAAMwA2AC4AMABwAHQAJwA+ADwAcwBwAGEAbgAgAHMAdAB5AGwAZQA9ACcA +bQBzAG8ALQBmAGEAcgBlAGEAcwB0AC0AZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoADQAKACAAIAAg +ACAAIAAiAFQAaQBtAGUAcwAgAE4AZQB3ACAAUgBvAG0AYQBuACIAJwA+AFMAdABlAHAAIAAzADoA +IAB2AGUAcgBpAGYAeQAgAHIAZQBzAHUAbAB0AHMAIABhAHIAZQAgAGEAcwAgAGUAeABwAGUAYwB0 +AGUAZAA8AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABhAG4APgA8AC8AbABpAD4ADQAKADwA +LwB1AGwAPgANAAoADQAKADwAcAA+AFQAaABlACAAYwBvAG0AYgBpAG4AYQB0AGkAbwBuAHMAIABv +AGYAIABjAG8AbgBmAGkAZwB1AHIAYQB0AGkAbwBuAHMAIABhAG4AZAAgAGEAYwBjAGUAcwBzACAA +dAB5AHAAZQAgAHcAaQBsAGwAIABiAGUAIABjAG8AdgBlAHIAZQBkAC4AIABGAG8AcgAgAHMAdABl +AHAADQAKADEALAAgAGkAdAAgAGkAbgBjAGwAdQBkAGUAcwAgAGIAbwB0AGgAIABlAHgAcABsAGkA +YwBpAHQAIAB2AGEAbAB1AGUAcwAgACgAbABpAGsAZQAgAEMAUwBSACAAdgBhAGwAdQBlAHMAKQAg +AGEAbgBkACAAaQBtAHAAbABpAGMAaQB0ACAAcwB0AGEAdAB1AHMAIAAoAGwAaQBrAGUADQAKAHcA +aABlAHQAaABlAHIAIABQAE0AUAAgAGwAbwBjAGsAIAB3AGEAcwAgAHMAZQB0ACkALgAgAEkAbgAg +AHQAaABpAHMAIAB3AGEAeQAsACAAdwBlACAAYwBhAG4AIABhAHYAbwBpAGQAIABjAHIAZQBhAHQA +aQBuAGcAIAB2AGEAcgBpAG8AdQBzACAAdABlAHMAdAANAAoAcwBlAHEAdQBlAG4AYwBlAHMALgA8 +AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHAAPgANAAoADQAKADwAaAAzACAAaQBkAD0AIgBwAG0A +cABlAG4AaABhAG4AYwBlAG0AZQBuAHQALQBzAHAAaQBrAGUALABzAGEAaQBsAGEAbgBkAHUAbgBp +AHQAdABlAHMAdAAtAEMAUwBSAGEAYwBjAGUAcwBzACIAPgA8AHMAcABhAG4AIABjAGwAYQBzAHMA +PQBuAGgALQBuAHUAbQBiAGUAcgA+ADwAcwBwAGEAbgANAAoAcwB0AHkAbABlAD0AJwBtAHMAbwAt +AGYAYQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAFQAaQBtAGUAcwAgAE4A +ZQB3ACAAUgBvAG0AYQBuACIAJwA+ADMALgAxAC4AMQAuACAAPAAvAHMAcABhAG4APgA8AC8AcwBw +AGEAbgA+ADwAcwBwAGEAbgANAAoAcwB0AHkAbABlAD0AJwBtAHMAbwAtAGYAYQByAGUAYQBzAHQA +LQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAFQAaQBtAGUAcwAgAE4AZQB3ACAAUgBvAG0AYQBu +ACIAJwA+AEMAUwBSACAAYQBjAGMAZQBzAHMAPABvADoAcAA+ADwALwBvADoAcAA+ADwALwBzAHAA +YQBuAD4APAAvAGgAMwA+AA0ACgANAAoAPABwAD4ARgBvAHIAIABDAFMAUgAgAGEAYwBjAGUAcwBz +ACwAIABvAG4AbAB5ACAATQAgAG0AbwBkAGUAIABpAHMAIAB0AGUAcwB0AGUAZAAuADwALwBwAD4A +DQAKAA0ACgA8AHAAPgBUAGUAcwB0ACAAYwBvAG4AZgBpAGcAdQByAGEAdABpAG8AbgBzACAAYwBh +AG4AIABiAGUAIABjAG8AbQBiAGkAbgBhAHQAaQBvAG4AcwAgAG8AZgAgAGYAbwBsAGwAbwB3AGkA +bgBnADoAPAAvAHAAPgANAAoADQAKADwAdQBsACAAdAB5AHAAZQA9AHMAcQB1AGEAcgBlAD4ADQAK +ACAAPABsAGkAIABjAGwAYQBzAHMAPQBNAHMAbwBOAG8AcgBtAGEAbAAgAHMAdAB5AGwAZQA9ACcA +bQBzAG8ALQBtAGEAcgBnAGkAbgAtAHQAbwBwAC0AYQBsAHQAOgBhAHUAdABvADsAbQBzAG8ALQBt +AGEAcgBnAGkAbgAtAGIAbwB0AHQAbwBtAC0AYQBsAHQAOgBhAHUAdABvADsADQAKACAAIAAgACAA +IABtAHMAbwAtAGwAaQBzAHQAOgBsADgAIABsAGUAdgBlAGwAMQAgAGwAZgBvADQAOwB0AGEAYgAt +AHMAdABvAHAAcwA6AGwAaQBzAHQAIAAzADYALgAwAHAAdAAnAD4APABzAHAAYQBuACAAcwB0AHkA +bABlAD0AJwBtAHMAbwAtAGYAYQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAN +AAoAIAAgACAAIAAgACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAnAD4AUABNAFAA +IABsAG8AYwBrAGUAZAAgAG8AcgAgAG4AbwB0ADwAbwA6AHAAPgA8AC8AbwA6AHAAPgA8AC8AcwBw +AGEAbgA+ADwALwBsAGkAPgANAAoAIAA8AGwAaQAgAGMAbABhAHMAcwA9AE0AcwBvAE4AbwByAG0A +YQBsACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AdABvAHAALQBhAGwAdAA6 +AGEAdQB0AG8AOwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AYgBvAHQAdABvAG0ALQBhAGwAdAA6AGEA +dQB0AG8AOwANAAoAIAAgACAAIAAgAG0AcwBvAC0AbABpAHMAdAA6AGwAOAAgAGwAZQB2AGUAbAAx +ACAAbABmAG8ANAA7AHQAYQBiAC0AcwB0AG8AcABzADoAbABpAHMAdAAgADMANgAuADAAcAB0ACcA +PgA8AHMAcABhAG4AIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBu +AHQALQBmAGEAbQBpAGwAeQA6AA0ACgAgACAAIAAgACAAIgBUAGkAbQBlAHMAIABOAGUAdwAgAFIA +bwBtAGEAbgAiACcAPgBJAGYAIABQAE0AUAAgAG4AbwB0ACAAbABvAGMAawBlAGQALAAgAHcAaABl +AHQAaABlAHIAIABpAHQAIAB3AGEAcwAgAGwAbwBjAGsAZQBkACAAcAByAGUAdgBpAG8AdQBzAGwA +eQA8AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABhAG4APgA8AC8AbABpAD4ADQAKACAAPABs +AGkAIABjAGwAYQBzAHMAPQBNAHMAbwBOAG8AcgBtAGEAbAAgAHMAdAB5AGwAZQA9ACcAbQBzAG8A +LQBtAGEAcgBnAGkAbgAtAHQAbwBwAC0AYQBsAHQAOgBhAHUAdABvADsAbQBzAG8ALQBtAGEAcgBn +AGkAbgAtAGIAbwB0AHQAbwBtAC0AYQBsAHQAOgBhAHUAdABvADsADQAKACAAIAAgACAAIABtAHMA +bwAtAGwAaQBzAHQAOgBsADgAIABsAGUAdgBlAGwAMQAgAGwAZgBvADQAOwB0AGEAYgAtAHMAdABv +AHAAcwA6AGwAaQBzAHQAIAAzADYALgAwAHAAdAAnAD4APABzAHAAYQBuACAAYwBsAGEAcwBzAD0A +UwBwAGUAbABsAEUAPgA8AHMAcABhAG4ADQAKACAAIAAgACAAIABzAHQAeQBsAGUAPQAnAG0AcwBv +AC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAA +TgBlAHcAIABSAG8AbQBhAG4AIgAnAD4AbQBzAGUAYwBjAGYAZwA8AC8AcwBwAGEAbgA+ADwALwBz +AHAAYQBuAD4APABzAHAAYQBuAA0ACgAgACAAIAAgACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAGYA +YQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAFQAaQBtAGUAcwAgAE4AZQB3 +ACAAUgBvAG0AYQBuACIAJwA+ACAAYgBpAHQAcwAsACAAaQBuAGMAbAB1AGQAaQBuAGcAIAA8AHMA +cABhAG4ADQAKACAAIAAgACAAIABjAGwAYQBzAHMAPQBTAHAAZQBsAGwARQA+AHIAbABiADwALwBz +AHAAYQBuAD4ALAAgADwAcwBwAGEAbgAgAGMAbABhAHMAcwA9AFMAcABlAGwAbABFAD4AbQBtAHcA +cAA8AC8AcwBwAGEAbgA+ACAAYQBuAGQAIABtAG0AbAA8AG8AOgBwAD4APAAvAG8AOgBwAD4APAAv +AHMAcABhAG4APgA8AC8AbABpAD4ADQAKADwALwB1AGwAPgANAAoADQAKADwAcAA+AEEAYwB0AGkA +bwBuACAAdAB5AHAAZQBzACAAYgBhAG4AIABiAGUAIABjAG8AbQBiAGkAbgBhAHQAaQBvAG4AcwAg +AG8AZgAgAGYAbwBsAGwAbwB3AGkAbgBnADoAPAAvAHAAPgANAAoADQAKADwAdQBsACAAdAB5AHAA +ZQA9AHMAcQB1AGEAcgBlAD4ADQAKACAAPABsAGkAIABjAGwAYQBzAHMAPQBNAHMAbwBOAG8AcgBt +AGEAbAAgAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAHQAbwBwAC0AYQBsAHQA +OgBhAHUAdABvADsAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAGIAbwB0AHQAbwBtAC0AYQBsAHQAOgBh +AHUAdABvADsADQAKACAAIAAgACAAIABtAHMAbwAtAGwAaQBzAHQAOgBsADEAMQAgAGwAZQB2AGUA +bAAxACAAbABmAG8ANQA7AHQAYQBiAC0AcwB0AG8AcABzADoAbABpAHMAdAAgADMANgAuADAAcAB0 +ACcAPgA8AHMAcABhAG4AIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYA +bwBuAHQALQBmAGEAbQBpAGwAeQA6AA0ACgAgACAAIAAgACAAIgBUAGkAbQBlAHMAIABOAGUAdwAg +AFIAbwBtAGEAbgAiACcAPgBUAGEAcgBnAGUAdAAgAEMAUwBSACgAcwApACwAIAB3AGgAZQB0AGgA +ZQByACAAPABzAHAAYQBuACAAYwBsAGEAcwBzAD0AUwBwAGUAbABsAEUAPgBwAG0AcABjAGYAZwA8 +AC8AcwBwAGEAbgA+AC8APABzAHAAYQBuAA0ACgAgACAAIAAgACAAYwBsAGEAcwBzAD0AUwBwAGUA +bABsAEUAPgBwAG0AcABhAGQAZAByADwALwBzAHAAYQBuAD4AIABvAHIAIAA8AHMAcABhAG4AIABj +AGwAYQBzAHMAPQBTAHAAZQBsAGwARQA+AG0AcwBlAGMAYwBmAGcAPAAvAHMAcABhAG4APgA8AG8A +OgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABhAG4APgA8AC8AbABpAD4ADQAKACAAPABsAGkAIABj +AGwAYQBzAHMAPQBNAHMAbwBOAG8AcgBtAGEAbAAgAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBtAGEA +cgBnAGkAbgAtAHQAbwBwAC0AYQBsAHQAOgBhAHUAdABvADsAbQBzAG8ALQBtAGEAcgBnAGkAbgAt +AGIAbwB0AHQAbwBtAC0AYQBsAHQAOgBhAHUAdABvADsADQAKACAAIAAgACAAIABtAHMAbwAtAGwA +aQBzAHQAOgBsADEAMQAgAGwAZQB2AGUAbAAxACAAbABmAG8ANQA7AHQAYQBiAC0AcwB0AG8AcABz +ADoAbABpAHMAdAAgADMANgAuADAAcAB0ACcAPgA8AHMAcABhAG4AIABzAHQAeQBsAGUAPQAnAG0A +cwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AA0ACgAgACAAIAAg +ACAAIgBUAGkAbQBlAHMAIABOAGUAdwAgAFIAbwBtAGEAbgAiACcAPgBUAGEAcgBnAGUAdAAgAHYA +YQBsAHUAZQBzACAAPABvADoAcAA+ADwALwBvADoAcAA+ADwALwBzAHAAYQBuAD4APAAvAGwAaQA+ +AA0ACgAgADwAdQBsACAAdAB5AHAAZQA9AHMAcQB1AGEAcgBlAD4ADQAKACAAIAA8AGwAaQAgAGMA +bABhAHMAcwA9AE0AcwBvAE4AbwByAG0AYQBsACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAG0AYQBy +AGcAaQBuAC0AdABvAHAALQBhAGwAdAA6AGEAdQB0AG8AOwBtAHMAbwAtAG0AYQByAGcAaQBuAC0A +YgBvAHQAdABvAG0ALQBhAGwAdAA6AA0ACgAgACAAIAAgACAAIABhAHUAdABvADsAbQBzAG8ALQBs +AGkAcwB0ADoAbAAxADEAIABsAGUAdgBlAGwAMgAgAGwAZgBvADUAOwB0AGEAYgAtAHMAdABvAHAA +cwA6AGwAaQBzAHQAIAA3ADIALgAwAHAAdAAnAD4APABzAHAAYQBuAA0ACgAgACAAIAAgACAAIABz +AHQAeQBsAGUAPQAnAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBpAGwA +eQA6ACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAnAD4ARgBvAHIAIAA8AHMAcABh +AG4AIABjAGwAYQBzAHMAPQBTAHAAZQBsAGwARQA+AG0AcwBlAGMAYwBmAGcAPAAvAHMAcABhAG4A +PgAsAA0ACgAgACAAIAAgACAAIABpAHQAcwAgADwAcwBwAGEAbgAgAGMAbABhAHMAcwA9AFMAcABl +AGwAbABFAD4AcgBsAGIAPAAvAHMAcABhAG4APgAsACAAPABzAHAAYQBuACAAYwBsAGEAcwBzAD0A +UwBwAGUAbABsAEUAPgBtAG0AdwBwADwALwBzAHAAYQBuAD4AIABhAG4AZAAgADwAcwBwAGEAbgAN +AAoAIAAgACAAIAAgACAAYwBsAGEAcwBzAD0AUwBwAGUAbABsAEUAPgBtAG0AcAA8AC8AcwBwAGEA +bgA+ACAAYgBpAHQAcwA8AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABhAG4APgA8AC8AbABp +AD4ADQAKACAAIAA8AGwAaQAgAGMAbABhAHMAcwA9AE0AcwBvAE4AbwByAG0AYQBsACAAcwB0AHkA +bABlAD0AJwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AdABvAHAALQBhAGwAdAA6AGEAdQB0AG8AOwBt +AHMAbwAtAG0AYQByAGcAaQBuAC0AYgBvAHQAdABvAG0ALQBhAGwAdAA6AA0ACgAgACAAIAAgACAA +IABhAHUAdABvADsAbQBzAG8ALQBsAGkAcwB0ADoAbAAxADEAIABsAGUAdgBlAGwAMgAgAGwAZgBv +ADUAOwB0AGEAYgAtAHMAdABvAHAAcwA6AGwAaQBzAHQAIAA3ADIALgAwAHAAdAAnAD4APABzAHAA +YQBuAA0ACgAgACAAIAAgACAAIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAt +AGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4A +IgAnAD4ARgBvAHIAIAA8AHMAcABhAG4AIABjAGwAYQBzAHMAPQBTAHAAZQBsAGwARQA+AHAAbQBw +AGEAZABkAHIAPAAvAHMAcABhAG4APgAsAA0ACgAgACAAIAAgACAAIABzAGUAbABlAGMAdABlAGQA +IAB2AGEAbABpAGQAIAB2AGEAbAB1AGUAcwA8AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABh +AG4APgA8AC8AbABpAD4ADQAKACAAIAA8AGwAaQAgAGMAbABhAHMAcwA9AE0AcwBvAE4AbwByAG0A +YQBsACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AdABvAHAALQBhAGwAdAA6 +AGEAdQB0AG8AOwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AYgBvAHQAdABvAG0ALQBhAGwAdAA6AA0A +CgAgACAAIAAgACAAIABhAHUAdABvADsAbQBzAG8ALQBsAGkAcwB0ADoAbAAxADEAIABsAGUAdgBl +AGwAMgAgAGwAZgBvADUAOwB0AGEAYgAtAHMAdABvAHAAcwA6AGwAaQBzAHQAIAA3ADIALgAwAHAA +dAAnAD4APABzAHAAYQBuAA0ACgAgACAAIAAgACAAIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AZgBh +AHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAATgBlAHcA +IABSAG8AbQBhAG4AIgAnAD4ARgBvAHIAIAA8AHMAcABhAG4AIABjAGwAYQBzAHMAPQBTAHAAZQBs +AGwARQA+AHAAbQBwAGMAZgBnADwALwBzAHAAYQBuAD4ALAANAAoAIAAgACAAIAAgACAAZgBvAHIA +IABSAC8AVwAvAFgAIAB2AGEAbAB1AGUAcwAgAGEAbgBkACAAcwB1AGIALQBpAG4AZABlAHgALgA8 +AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABhAG4APgA8AC8AbABpAD4ADQAKACAAPAAvAHUA +bAA+AA0ACgA8AC8AdQBsAD4ADQAKAA0ACgA8AGgAMwAgAGkAZAA9ACIAcABtAHAAZQBuAGgAYQBu +AGMAZQBtAGUAbgB0AC0AcwBwAGkAawBlACwAcwBhAGkAbABhAG4AZAB1AG4AaQB0AHQAZQBzAHQA +LQBNAGUAbQBvAHIAeQBhAGMAYwBlAHMAcwAiAD4APABzAHAAYQBuAA0ACgBjAGwAYQBzAHMAPQBu +AGgALQBuAHUAbQBiAGUAcgA+ADwAcwBwAGEAbgAgAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBmAGEA +cgBlAGEAcwB0AC0AZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAIgBUAGkAbQBlAHMAIABOAGUAdwAg +AFIAbwBtAGEAbgAiACcAPgAzAC4AMQAuADIALgAgADwALwBzAHAAYQBuAD4APAAvAHMAcABhAG4A +PgA8AHMAcABhAG4ADQAKAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBmAGEAcgBlAGEAcwB0AC0AZgBv +AG4AdAAtAGYAYQBtAGkAbAB5ADoAIgBUAGkAbQBlAHMAIABOAGUAdwAgAFIAbwBtAGEAbgAiACcA +PgBNAGUAbQBvAHIAeQAgAGEAYwBjAGUAcwBzADwAbwA6AHAAPgA8AC8AbwA6AHAAPgA8AC8AcwBw +AGEAbgA+ADwALwBoADMAPgANAAoADQAKADwAcAA+AFQAZQBzAHQAIABmAG8AcgAgAG0AZQBtAG8A +cgB5ACAAYQBjAGMAZQBzAHMAIABjAGEAbgAgAGIAZQAgAGQAaQB2AGkAZABlAGQAIABpAG4AdABv +ACAAMgAgAHAAYQByAHQAcwA6ADwALwBwAD4ADQAKAA0ACgA8AHUAbAAgAHQAeQBwAGUAPQBzAHEA +dQBhAHIAZQA+AA0ACgAgADwAbABpACAAYwBsAGEAcwBzAD0ATQBzAG8ATgBvAHIAbQBhAGwAIABz +AHQAeQBsAGUAPQAnAG0AcwBvAC0AbQBhAHIAZwBpAG4ALQB0AG8AcAAtAGEAbAB0ADoAYQB1AHQA +bwA7AG0AcwBvAC0AbQBhAHIAZwBpAG4ALQBiAG8AdAB0AG8AbQAtAGEAbAB0ADoAYQB1AHQAbwA7 +AA0ACgAgACAAIAAgACAAbQBzAG8ALQBsAGkAcwB0ADoAbAAxACAAbABlAHYAZQBsADEAIABsAGYA +bwA2ADsAdABhAGIALQBzAHQAbwBwAHMAOgBsAGkAcwB0ACAAMwA2AC4AMABwAHQAJwA+ADwAcwBw +AGEAbgAgAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBmAGEAcgBlAGEAcwB0AC0AZgBvAG4AdAAtAGYA +YQBtAGkAbAB5ADoADQAKACAAIAAgACAAIAAiAFQAaQBtAGUAcwAgAE4AZQB3ACAAUgBvAG0AYQBu +ACIAJwA+AG4AbwBuAC0AcwBoAGEAcgBlACAAbQBvAGQAZQAgACgAUgBXACYAbgBiAHMAcAA7ACEA +PQAgADAAMQApADwAbwA6AHAAPgA8AC8AbwA6AHAAPgA8AC8AcwBwAGEAbgA+ADwALwBsAGkAPgAN +AAoAIAA8AGwAaQAgAGMAbABhAHMAcwA9AE0AcwBvAE4AbwByAG0AYQBsACAAcwB0AHkAbABlAD0A +JwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AdABvAHAALQBhAGwAdAA6AGEAdQB0AG8AOwBtAHMAbwAt +AG0AYQByAGcAaQBuAC0AYgBvAHQAdABvAG0ALQBhAGwAdAA6AGEAdQB0AG8AOwANAAoAIAAgACAA +IAAgAG0AcwBvAC0AbABpAHMAdAA6AGwAMQAgAGwAZQB2AGUAbAAxACAAbABmAG8ANgA7AHQAYQBi +AC0AcwB0AG8AcABzADoAbABpAHMAdAAgADMANgAuADAAcAB0ACcAPgA8AHMAcABhAG4AIABzAHQA +eQBsAGUAPQAnAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6 +AA0ACgAgACAAIAAgACAAIgBUAGkAbQBlAHMAIABOAGUAdwAgAFIAbwBtAGEAbgAiACcAPgBzAGgA +YQByAGUAIABtAG8AZABlACAAYQBjAGMAZQBzAHMAIABiAGUAdAB3AGUAZQBuACAAVQAgAG0AbwBk +AGUAIABhAG4AZAAgAE0AIABtAG8AZABlAC4APABvADoAcAA+ADwALwBvADoAcAA+ADwALwBzAHAA +YQBuAD4APAAvAGwAaQA+AA0ACgA8AC8AdQBsAD4ADQAKAA0ACgA8AHAAPgBJAG4AIABiAG8AdABo +ACAAYwBhAHMAZQBzACwAIAB3AGgAZQBuACAAdABoAGUAcgBlACAAaQBzACAAYQAgADwAcwBwAGEA +bgAgAGMAbABhAHMAcwA9AFMAcABlAGwAbABFAD4AcABtAHAAPAAvAHMAcABhAG4APgAgAGUAeABj +AGUAcAB0AGkAbwBuACAAZgByAG8AbQAgAGwAbwBhAGQALwBzAHQAbwByAGUALAANAAoAdABoAGUA +IABlAHgAYwBlAHAAdABpAG8AbgAgAGgAYQBuAGQAbABlAHIAIAB3AGkAbABsACAAcgBlAGQAaQBy +AGUAYwB0ACAAUABDACAAdABvACAAcgBlAHMAdQBtAGUAIABuAGUAeAB0ACAAaQBuAHMAdAByAHUA +YwB0AGkAbwBuAC4AIABXAGgAaQBsAGUAIABmAG8AcgANAAoAZgBlAHQAYwBoACAAZQB4AGMAZQBw +AHQAaQBvAG4ALAAgAHQAaABlACAAdABlAHMAdAAgAHcAaQBsAGwAIABiAHIAZQBhAGsALAAgAGEA +bgBkACAAcgBlAHAAbwByAHQAIAByAGUAcwB1AGwAdAAgAGkAbQBtAGUAZABpAGEAdABlAGwAeQAu +ADwALwBwAD4ADQAKAA0ACgA8AHAAPgBBACAAcwBwAGUAYwBpAGEAbAAgAGMAYQBzAGUAIABpAHMA +IAB0AGgAZQAgAGQAZQBwAGUAbgBkAGUAbgBjAHkAIABiAGUAdAB3AGUAZQBuACAATAAgAGIAaQB0 +ACAAYQBuAGQAIABNAE0ATAAgAGIAaQB0AC4AIABMACAAYgBpAHQAIABpAHMAIABzAGUAdAAgAGkA +bgANAAoAYQBkAHYAYQBuAGMAZQAgAHQAbwAgAHMAZQB0ACAATQBNAEwAIABiAGkAdAA7ACAAbwB0 +AGgAZQByAHcAaQBzAGUAIABhAGYAdABlAHIAIABNAE0ATAAgAGIAaQB0ACAAcwBlAHQALAAgAE0A +IABtAG8AZABlACAAaQBzACAAbgBvAHQAIABhAGwAbABvAHcAZQBkACAAdABvAA0ACgBhAGMAYwBl +AHMAcwAgAGUAdgBlAG4AIABpAG4AdABlAHIAcgB1AHAAdAAgAGgAYQBuAGQAbABlAHIALgA8AG8A +OgBwAD4APAAvAG8AOgBwAD4APAAvAHAAPgANAAoADQAKADwAaAA0ACAAaQBkAD0AIgBwAG0AcABl +AG4AaABhAG4AYwBlAG0AZQBuAHQALQBzAHAAaQBrAGUALABzAGEAaQBsAGEAbgBkAHUAbgBpAHQA +dABlAHMAdAAtAE4AbwBuAC0AcwBoAGEAcgBlAG0AbwBkAGUAIgA+ADwAcwBwAGEAbgANAAoAYwBs +AGEAcwBzAD0AbgBoAC0AbgB1AG0AYgBlAHIAPgA8AHMAcABhAG4AIABzAHQAeQBsAGUAPQAnAG0A +cwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6ACIAVABpAG0AZQBz +ACAATgBlAHcAIABSAG8AbQBhAG4AIgAnAD4AMwAuADEALgAyAC4AMQAuAA0ACgA8AC8AcwBwAGEA +bgA+ADwALwBzAHAAYQBuAD4APABzAHAAYQBuACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAGYAYQBy +AGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAFQAaQBtAGUAcwAgAE4AZQB3ACAA +UgBvAG0AYQBuACIAJwA+AE4AbwBuAC0AcwBoAGEAcgBlAA0ACgBtAG8AZABlADwAbwA6AHAAPgA8 +AC8AbwA6AHAAPgA8AC8AcwBwAGEAbgA+ADwALwBoADQAPgANAAoADQAKADwAcAA+AEYAbwByACAA +bgBvAG4ALQBzAGgAYQByAGUAIABtAG8AZABlACwAIAB0AGUAcwB0ACAAYwBvAG4AZgBpAGcAdQBy +AGEAdABpAG8AbgBzACAAYwBhAG4AIABiAGUAIABjAG8AbQBiAGkAbgBhAHQAaQBvAG4AcwAgAG8A +ZgAgAGYAbwBsAGwAbwB3AGkAbgBnADoAPAAvAHAAPgANAAoADQAKADwAdQBsACAAdAB5AHAAZQA9 +AHMAcQB1AGEAcgBlAD4ADQAKACAAPABsAGkAIABjAGwAYQBzAHMAPQBNAHMAbwBOAG8AcgBtAGEA +bAAgAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAHQAbwBwAC0AYQBsAHQAOgBh +AHUAdABvADsAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAGIAbwB0AHQAbwBtAC0AYQBsAHQAOgBhAHUA +dABvADsADQAKACAAIAAgACAAIABtAHMAbwAtAGwAaQBzAHQAOgBsADQAIABsAGUAdgBlAGwAMQAg +AGwAZgBvADcAOwB0AGEAYgAtAHMAdABvAHAAcwA6AGwAaQBzAHQAIAAzADYALgAwAHAAdAAnAD4A +PABzAHAAYQBuACAAYwBsAGEAcwBzAD0AUwBwAGUAbABsAEUAPgA8AHMAcABhAG4ADQAKACAAIAAg +ACAAIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEA +bQBpAGwAeQA6ACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAnAD4AbQBzAGUAYwBj +AGYAZwA8AC8AcwBwAGEAbgA+ADwALwBzAHAAYQBuAD4APABzAHAAYQBuAA0ACgAgACAAIAAgACAA +cwB0AHkAbABlAD0AJwBtAHMAbwAtAGYAYQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBs +AHkAOgAiAFQAaQBtAGUAcwAgAE4AZQB3ACAAUgBvAG0AYQBuACIAJwA+ACAAYgBpAHQAcwAsACAA +aQBuAGMAbAB1AGQAaQBuAGcAIAA8AHMAcABhAG4ADQAKACAAIAAgACAAIABjAGwAYQBzAHMAPQBT +AHAAZQBsAGwARQA+AG0AbQB3AHAAPAAvAHMAcABhAG4APgAgAGEAbgBkACAAbQBtAGwALgAgAEgA +ZQByAGUAIAA8AHMAcABhAG4AIABjAGwAYQBzAHMAPQBTAHAAZQBsAGwARQA+AHIAbABiADwALwBz +AHAAYQBuAD4AIABiAGkAdABzAA0ACgAgACAAIAAgACAAYwBhAG4AIABiAGUAIABvAHAAdABpAG8A +bgBhAGwAPABvADoAcAA+ADwALwBvADoAcAA+ADwALwBzAHAAYQBuAD4APAAvAGwAaQA+AA0ACgAg +ADwAbABpACAAYwBsAGEAcwBzAD0ATQBzAG8ATgBvAHIAbQBhAGwAIABzAHQAeQBsAGUAPQAnAG0A +cwBvAC0AbQBhAHIAZwBpAG4ALQB0AG8AcAAtAGEAbAB0ADoAYQB1AHQAbwA7AG0AcwBvAC0AbQBh +AHIAZwBpAG4ALQBiAG8AdAB0AG8AbQAtAGEAbAB0ADoAYQB1AHQAbwA7AA0ACgAgACAAIAAgACAA +bQBzAG8ALQBsAGkAcwB0ADoAbAA0ACAAbABlAHYAZQBsADEAIABsAGYAbwA3ADsAdABhAGIALQBz +AHQAbwBwAHMAOgBsAGkAcwB0ACAAMwA2AC4AMABwAHQAJwA+ADwAcwBwAGEAbgAgAHMAdAB5AGwA +ZQA9ACcAbQBzAG8ALQBmAGEAcgBlAGEAcwB0AC0AZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoADQAK +ACAAIAAgACAAIAAiAFQAaQBtAGUAcwAgAE4AZQB3ACAAUgBvAG0AYQBuACIAJwA+AEEAIABtAGEA +dABjAGgAZQBkACAAPABzAHAAYQBuACAAYwBsAGEAcwBzAD0AUwBwAGUAbABsAEUAPgBwAG0AcAA8 +AC8AcwBwAGEAbgA+ACAAZQBuAHQAcgBhAG4AYwBlACAAaQBzAA0ACgAgACAAIAAgACAAYQB2AGEA +aQBsAGEAYgBsAGUAIABvAHIAIABuAG8AdAA8AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABh +AG4APgA8AC8AbABpAD4ADQAKACAAPABsAGkAIABjAGwAYQBzAHMAPQBNAHMAbwBOAG8AcgBtAGEA +bAAgAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAHQAbwBwAC0AYQBsAHQAOgBh +AHUAdABvADsAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAGIAbwB0AHQAbwBtAC0AYQBsAHQAOgBhAHUA +dABvADsADQAKACAAIAAgACAAIABtAHMAbwAtAGwAaQBzAHQAOgBsADQAIABsAGUAdgBlAGwAMQAg +AGwAZgBvADcAOwB0AGEAYgAtAHMAdABvAHAAcwA6AGwAaQBzAHQAIAAzADYALgAwAHAAdAAnAD4A +PABzAHAAYQBuACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAGYAYQByAGUAYQBzAHQALQBmAG8AbgB0 +AC0AZgBhAG0AaQBsAHkAOgANAAoAIAAgACAAIAAgACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8A +bQBhAG4AIgAnAD4AVwBoAGUAbgAgAG0AYQB0AGMAaABlAGQALAAgAHIAZQBsAGEAdABlAGQAIABS +AC8AVwAvAFgALwBMACAAYgBpAHQAcwA8AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABhAG4A +PgA8AC8AbABpAD4ADQAKADwALwB1AGwAPgANAAoADQAKADwAcAA+AFIAZQBsAGEAdABlAGQAIABh +AGMAdABpAG8AbgAgAHQAeQBwAGUAcwAgAGIAYQBuACAAYgBlACAAYwBvAG0AYgBpAG4AYQB0AGkA +bwBuAHMAIABvAGYAIABmAG8AbABsAG8AdwBpAG4AZwA6ADwALwBwAD4ADQAKAA0ACgA8AHUAbAAg +AHQAeQBwAGUAPQBzAHEAdQBhAHIAZQA+AA0ACgAgADwAbABpACAAYwBsAGEAcwBzAD0ATQBzAG8A +TgBvAHIAbQBhAGwAIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AbQBhAHIAZwBpAG4ALQB0AG8AcAAt +AGEAbAB0ADoAYQB1AHQAbwA7AG0AcwBvAC0AbQBhAHIAZwBpAG4ALQBiAG8AdAB0AG8AbQAtAGEA +bAB0ADoAYQB1AHQAbwA7AA0ACgAgACAAIAAgACAAbQBzAG8ALQBsAGkAcwB0ADoAbAA5ACAAbABl +AHYAZQBsADEAIABsAGYAbwA4ADsAdABhAGIALQBzAHQAbwBwAHMAOgBsAGkAcwB0ACAAMwA2AC4A +MABwAHQAJwA+ADwAcwBwAGEAbgAgAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBmAGEAcgBlAGEAcwB0 +AC0AZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoADQAKACAAIAAgACAAIAAiAFQAaQBtAGUAcwAgAE4A +ZQB3ACAAUgBvAG0AYQBuACIAJwA+AE0AIABtAG8AZABlACAAbwByACAAVQAgAG0AbwBkAGUAPABv +ADoAcAA+ADwALwBvADoAcAA+ADwALwBzAHAAYQBuAD4APAAvAGwAaQA+AA0ACgA8AC8AdQBsAD4A +DQAKAA0ACgA8AHAAPgBMAG8AYQBkAC8AcwB0AG8AcgBlACAAYQBuAGQAIABmAGUAdABjAGgAIABh +AHIAZQAgAGEAbABsACAAdABlAHMAdABlAGQAIABpAG4AIABlAGEAYwBoACAAdABlAHMAdAAgAGMA +YQBzAGUALgA8AC8AcAA+AA0ACgANAAoAPABwAD4APABvADoAcAA+ACYAbgBiAHMAcAA7ADwALwBv +ADoAcAA+ADwALwBwAD4ADQAKAA0ACgA8AGgANAAgAGkAZAA9ACIAcABtAHAAZQBuAGgAYQBuAGMA +ZQBtAGUAbgB0AC0AcwBwAGkAawBlACwAcwBhAGkAbABhAG4AZAB1AG4AaQB0AHQAZQBzAHQALQBT +AGgAYQByAGUAbQBvAGQAZQAiAD4APABzAHAAYQBuACAAYwBsAGEAcwBzAD0AbgBoAC0AbgB1AG0A +YgBlAHIAPgA8AHMAcABhAG4ADQAKAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBmAGEAcgBlAGEAcwB0 +AC0AZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAIgBUAGkAbQBlAHMAIABOAGUAdwAgAFIAbwBtAGEA +bgAiACcAPgAzAC4AMQAuADIALgAyAC4AIAA8AC8AcwBwAGEAbgA+ADwALwBzAHAAYQBuAD4APABz +AHAAYQBuAA0ACgBzAHQAeQBsAGUAPQAnAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQA +LQBmAGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAnAD4AUwBo +AGEAcgBlACAAbQBvAGQAZQA8AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABhAG4APgA8AC8A +aAA0AD4ADQAKAA0ACgA8AHAAPgBGAG8AcgAgAHMAaABhAHIAZQAgAG0AbwBkAGUALAAgAGEAcwBz +AHUAbQBlACAAUgBXAD0AMAAxACwAIABtAG0AbAAvADwAcwBwAGEAbgAgAGMAbABhAHMAcwA9AFMA +cABlAGwAbABFAD4AbQBtAHcAcAA8AC8AcwBwAGEAbgA+ACAAcwBlAHQAIABhAG4AZAANAAoAdABo +AGUAcgBlACAAaQBzACAAbQBhAHQAYwBoAGUAZAAgAGUAbgB0AHIAYQBuAGMAZQAsACAAdABlAHMA +dAAgAGMAbwBuAGYAaQBnAHUAcgBhAHQAaQBvAG4AcwAgAGMAYQBuACAAYgBlACAAYwBvAG0AYgBp +AG4AYQB0AGkAbwBuAHMAIABvAGYADQAKAGYAbwBsAGwAbwB3AGkAbgBnADoAPAAvAHAAPgANAAoA +DQAKADwAdQBsACAAdAB5AHAAZQA9AGQAaQBzAGMAPgANAAoAIAA8AGwAaQAgAGMAbABhAHMAcwA9 +AE0AcwBvAE4AbwByAG0AYQBsACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAG0AYQByAGcAaQBuAC0A +dABvAHAALQBhAGwAdAA6AGEAdQB0AG8AOwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AYgBvAHQAdABv +AG0ALQBhAGwAdAA6AGEAdQB0AG8AOwANAAoAIAAgACAAIAAgAG0AcwBvAC0AbABpAHMAdAA6AGwA +MQAwACAAbABlAHYAZQBsADEAIABsAGYAbwA5ADsAdABhAGIALQBzAHQAbwBwAHMAOgBsAGkAcwB0 +ACAAMwA2AC4AMABwAHQAJwA+ADwAcwBwAGEAbgAgAGMAbABhAHMAcwA9AFMAcABlAGwAbABFAD4A +PABzAHAAYQBuAA0ACgAgACAAIAAgACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAGYAYQByAGUAYQBz +AHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAFQAaQBtAGUAcwAgAE4AZQB3ACAAUgBvAG0A +YQBuACIAJwA+AHAAbQBwAGMAZgBnADwALwBzAHAAYQBuAD4APAAvAHMAcABhAG4APgA8AHMAcABh +AG4ADQAKACAAIAAgACAAIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYA +bwBuAHQALQBmAGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAn +AD4AIABSAC8AWAAvAEwAIABiAGkAdABzAC4AIABIAGUAcgBlACAAUgAgAGIAaQB0AA0ACgAgACAA +IAAgACAAPQAgADEAIAB0AG8AIABnAGkAdgBlACAAPABzAHAAYQBuACAAYwBsAGEAcwBzAD0AUwBw +AGUAbABsAEUAPgBuAGEAZwBhAHQAaQB2AGUAPAAvAHMAcABhAG4APgAgAHQAZQBzAHQAcwAgAGYA +bwByACAAbgBvAG4ALQBzAGgAYQByAGUAIABtAG8AZABlAC4APABvADoAcAA+ADwALwBvADoAcAA+ +ADwALwBzAHAAYQBuAD4APAAvAGwAaQA+AA0ACgA8AC8AdQBsAD4ADQAKAA0ACgA8AHAAPgBSAGUA +bABhAHQAZQBkACAAYQBjAHQAaQBvAG4AIAB0AHkAcABlAHMAIABiAGEAbgAgAGIAZQAgAGMAbwBt +AGIAaQBuAGEAdABpAG8AbgBzACAAbwBmACAAZgBvAGwAbABvAHcAaQBuAGcAOgA8AC8AcAA+AA0A +CgANAAoAPAB1AGwAIAB0AHkAcABlAD0AZABpAHMAYwA+AA0ACgAgADwAbABpACAAYwBsAGEAcwBz +AD0ATQBzAG8ATgBvAHIAbQBhAGwAIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AbQBhAHIAZwBpAG4A +LQB0AG8AcAAtAGEAbAB0ADoAYQB1AHQAbwA7AG0AcwBvAC0AbQBhAHIAZwBpAG4ALQBiAG8AdAB0 +AG8AbQAtAGEAbAB0ADoAYQB1AHQAbwA7AA0ACgAgACAAIAAgACAAbQBzAG8ALQBsAGkAcwB0ADoA +bAAwACAAbABlAHYAZQBsADEAIABsAGYAbwAxADAAOwB0AGEAYgAtAHMAdABvAHAAcwA6AGwAaQBz +AHQAIAAzADYALgAwAHAAdAAnAD4APABzAHAAYQBuACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAGYA +YQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgANAAoAIAAgACAAIAAgACIAVABp +AG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAnAD4ATQAgAG0AbwBkAGUAIABvAHIAIABVACAA +bQBvAGQAZQA8AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABhAG4APgA8AC8AbABpAD4ADQAK +ACAAPABsAGkAIABjAGwAYQBzAHMAPQBNAHMAbwBOAG8AcgBtAGEAbAAgAHMAdAB5AGwAZQA9ACcA +bQBzAG8ALQBtAGEAcgBnAGkAbgAtAHQAbwBwAC0AYQBsAHQAOgBhAHUAdABvADsAbQBzAG8ALQBt +AGEAcgBnAGkAbgAtAGIAbwB0AHQAbwBtAC0AYQBsAHQAOgBhAHUAdABvADsADQAKACAAIAAgACAA +IABtAHMAbwAtAGwAaQBzAHQAOgBsADAAIABsAGUAdgBlAGwAMQAgAGwAZgBvADEAMAA7AHQAYQBi +AC0AcwB0AG8AcABzADoAbABpAHMAdAAgADMANgAuADAAcAB0ACcAPgA8AHMAcABhAG4AIABzAHQA +eQBsAGUAPQAnAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6 +AA0ACgAgACAAIAAgACAAIgBUAGkAbQBlAHMAIABOAGUAdwAgAFIAbwBtAGEAbgAiACcAPgBMAG8A +YQBkAC8AUwB0AG8AcgBlACAAbwByACAAZQB4AGUAYwB1AHQAZQAuADwAbwA6AHAAPgA8AC8AbwA6 +AHAAPgA8AC8AcwBwAGEAbgA+ADwALwBsAGkAPgANAAoAPAAvAHUAbAA+AA0ACgANAAoAPABoADMA +IABpAGQAPQAiAHAAbQBwAGUAbgBoAGEAbgBjAGUAbQBlAG4AdAAtAHMAcABpAGsAZQAsAHMAYQBp +AGwAYQBuAGQAdQBuAGkAdAB0AGUAcwB0AC0AVABlAHMAdAByAGEAbgBnAGUAIgA+ADwAcwBwAGEA +bgAgAGMAbABhAHMAcwA9AG4AaAAtAG4AdQBtAGIAZQByAD4APABzAHAAYQBuAA0ACgBzAHQAeQBs +AGUAPQAnAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6ACIA +VABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAnAD4AMwAuADEALgAzAC4AIAA8AC8AcwBw +AGEAbgA+ADwALwBzAHAAYQBuAD4APABzAHAAYQBuAA0ACgBzAHQAeQBsAGUAPQAnAG0AcwBvAC0A +ZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAATgBl +AHcAIABSAG8AbQBhAG4AIgAnAD4AVABlAHMAdAAgAHIAYQBuAGcAZQA8AG8AOgBwAD4APAAvAG8A +OgBwAD4APAAvAHMAcABhAG4APgA8AC8AaAAzAD4ADQAKAA0ACgA8AHAAPgBFAHgAaQBzAHQAZQBk +ACAAZgBlAGEAdAB1AHIAZQBzACAAYQByAGUAIABuAG8AdAAgAHQAZQBzAHQAIAB0AGEAcgBnAGUA +dABzACwAIABpAGYAIAB0AGgAZQB5ACAAYQByAGUAIABpAG4AZABlAHAAZQBuAGQAZQBuAHQAIAB3 +AGkAdABoACYAbgBiAHMAcAA7ADwAcwBwAGEAbgANAAoAYwBsAGEAcwBzAD0AUwBwAGUAbABsAEUA +PgBwAG0AcAA8AC8AcwBwAGEAbgA+ACAAZQBuAGgAYQBuAGMAZQBtAGUAbgB0AC4AJgBuAGIAcwBw +ADsAPAAvAHAAPgANAAoADQAKADwAdQBsACAAdAB5AHAAZQA9AHMAcQB1AGEAcgBlAD4ADQAKACAA +PABsAGkAIABjAGwAYQBzAHMAPQBNAHMAbwBOAG8AcgBtAGEAbAAgAHMAdAB5AGwAZQA9ACcAbQBz +AG8ALQBtAGEAcgBnAGkAbgAtAHQAbwBwAC0AYQBsAHQAOgBhAHUAdABvADsAbQBzAG8ALQBtAGEA +cgBnAGkAbgAtAGIAbwB0AHQAbwBtAC0AYQBsAHQAOgBhAHUAdABvADsADQAKACAAIAAgACAAIABt +AHMAbwAtAGwAaQBzAHQAOgBsADEAMgAgAGwAZQB2AGUAbAAxACAAbABmAG8AMQAxADsAdABhAGIA +LQBzAHQAbwBwAHMAOgBsAGkAcwB0ACAAMwA2AC4AMABwAHQAJwA+ADwAcwBwAGEAbgAgAHMAdAB5 +AGwAZQA9ACcAbQBzAG8ALQBmAGEAcgBlAGEAcwB0AC0AZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoA +DQAKACAAIAAgACAAIAAiAFQAaQBtAGUAcwAgAE4AZQB3ACAAUgBvAG0AYQBuACIAJwA+AFQAaABl +ACAAZQB4AGkAcwB0AGUAZAAgADwAcwBwAGEAbgAgAGMAbABhAHMAcwA9AFMAcABlAGwAbABFAD4A +cABtAHAAPAAvAHMAcABhAG4APgAgAHAAcgBvAHQAZQBjAHQAaQBvAG4ADQAKACAAIAAgACAAIAB3 +AGkAdABoACAAPABzAHAAYQBuACAAYwBsAGEAcwBzAD0AUwBwAGUAbABsAEUAPgBwAG0AcABjAGYA +ZwA8AC8AcwBwAGEAbgA+ACAAYQBuAGQAIAA8AHMAcABhAG4AIABjAGwAYQBzAHMAPQBTAHAAZQBs +AGwARQA+AHAAbQBwAGEAZABkAHIAPAAvAHMAcABhAG4APgANAAoAIAAgACAAIAAgAGkAcwAgAG4A +bwB0ACAAdABlAHMAdABlAGQALgA8AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABhAG4APgA8 +AC8AbABpAD4ADQAKACAAPABsAGkAIABjAGwAYQBzAHMAPQBNAHMAbwBOAG8AcgBtAGEAbAAgAHMA +dAB5AGwAZQA9ACcAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAHQAbwBwAC0AYQBsAHQAOgBhAHUAdABv +ADsAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAGIAbwB0AHQAbwBtAC0AYQBsAHQAOgBhAHUAdABvADsA +DQAKACAAIAAgACAAIABtAHMAbwAtAGwAaQBzAHQAOgBsADEAMgAgAGwAZQB2AGUAbAAxACAAbABm +AG8AMQAxADsAdABhAGIALQBzAHQAbwBwAHMAOgBsAGkAcwB0ACAAMwA2AC4AMABwAHQAJwA+ADwA +cwBwAGEAbgAgAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBmAGEAcgBlAGEAcwB0AC0AZgBvAG4AdAAt +AGYAYQBtAGkAbAB5ADoADQAKACAAIAAgACAAIAAiAFQAaQBtAGUAcwAgAE4AZQB3ACAAUgBvAG0A +YQBuACIAJwA+AFQAaABlACAAZQBuAHQAcgBhAG4AYwBlACAAcAByAGkAbwByAGkAdAB5ACAAYwBo +AGUAYwBrACAAaQBzACAAbgBvAHQAIAB0AGUAcwB0AGUAZAAuACAASQBuAHMAdABlAGEAZAAsACAA +YQBzAA0ACgAgACAAIAAgACAAcgBlAGwAYQB0AGUAZAAgAHQAbwAgADwAcwBwAGEAbgAgAGMAbABh +AHMAcwA9AFMAcABlAGwAbABFAD4AbQBzAGUAYwBjAGYAZwA8AC8AcwBwAGEAbgA+ACwAIABiAG8A +dABoACAAbQBhAHQAYwBoAGUAZAAgAGEAbgBkACAAbgBvAG4ALQBtAGEAdABjAGgAZQBkAA0ACgAg +ACAAIAAgACAAYQByAGUAIAB0AGUAcwB0AGUAZAAuADwAbwA6AHAAPgA8AC8AbwA6AHAAPgA8AC8A +cwBwAGEAbgA+ADwALwBsAGkAPgANAAoAIAA8AGwAaQAgAGMAbABhAHMAcwA9AE0AcwBvAE4AbwBy +AG0AYQBsACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AdABvAHAALQBhAGwA +dAA6AGEAdQB0AG8AOwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AYgBvAHQAdABvAG0ALQBhAGwAdAA6 +AGEAdQB0AG8AOwANAAoAIAAgACAAIAAgAG0AcwBvAC0AbABpAHMAdAA6AGwAMQAyACAAbABlAHYA +ZQBsADEAIABsAGYAbwAxADEAOwB0AGEAYgAtAHMAdABvAHAAcwA6AGwAaQBzAHQAIAAzADYALgAw +AHAAdAAnAD4APABzAHAAYQBuACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAGYAYQByAGUAYQBzAHQA +LQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgANAAoAIAAgACAAIAAgACIAVABpAG0AZQBzACAATgBl +AHcAIABSAG8AbQBhAG4AIgAnAD4ATwBuAGwAeQAgAE0AIABtAG8AZABlACAAYQBuAGQAIABVACAA +bQBvAGQAZQAgAGEAcgBlACAAdABlAHMAdABlAGQALgAgAEYAbwByACAAPABzAHAAYQBuAA0ACgAg +ACAAIAAgACAAYwBsAGEAcwBzAD0AUwBwAGUAbABsAEUAPgBwAG0AcAA8AC8AcwBwAGEAbgA+ACwA +IABpAHQAIABjAGEAcgBlAHMAIABtAG8AcgBlACAAbwBuACAAdwBoAGUAdABoAGUAcgAgAE0AIABt +AG8AZABlACAAbwByACAAbgBvAHQALgA8AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABhAG4A +PgA8AC8AbABpAD4ADQAKACAAPABsAGkAIABjAGwAYQBzAHMAPQBNAHMAbwBOAG8AcgBtAGEAbAAg +AHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAHQAbwBwAC0AYQBsAHQAOgBhAHUA +dABvADsAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAGIAbwB0AHQAbwBtAC0AYQBsAHQAOgBhAHUAdABv +ADsADQAKACAAIAAgACAAIABtAHMAbwAtAGwAaQBzAHQAOgBsADEAMgAgAGwAZQB2AGUAbAAxACAA +bABmAG8AMQAxADsAdABhAGIALQBzAHQAbwBwAHMAOgBsAGkAcwB0ACAAMwA2AC4AMABwAHQAJwA+ +ADwAcwBwAGEAbgAgAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBmAGEAcgBlAGEAcwB0AC0AZgBvAG4A +dAAtAGYAYQBtAGkAbAB5ADoADQAKACAAIAAgACAAIAAiAFQAaQBtAGUAcwAgAE4AZQB3ACAAUgBv +AG0AYQBuACIAJwA+AEYAbwByACAAQwBTAFIAIABhAGMAYwBlAHMAcwAgAG8AbgAgADwAcwBwAGEA +bgAgAGMAbABhAHMAcwA9AFMAcABlAGwAbABFAD4AcABtAHAAYQBkAGQAcgA8AC8AcwBwAGEAbgA+ +ACAAYQBuAGQADQAKACAAIAAgACAAIAA8AHMAcABhAG4AIABjAGwAYQBzAHMAPQBTAHAAZQBsAGwA +RQA+AHAAbQBwAGMAZgBnADwALwBzAHAAYQBuAD4ALAAgAHQAZQBzAHQAIABmAG8AYwB1AHMAIABv +AG4AIABSAC8AVwAvAFgAIABiAGkAdABzACAAcwBpAG4AYwBlACAAdABoAGUAcgBlACAAaQBzAA0A +CgAgACAAIAAgACAAYQAgAG4AZQB3ACAAbQBvAGQAZQAgAFIAVwA9ADAAMQAuACAAUAByAG8AdABl +AGMAdABpAG8AbgAgAHcAaQB0AGgAIABMACAAYgBpAHQAIABpAHMAIABuAG8AdAAgADwAcwBwAGEA +bgAgAGMAbABhAHMAcwA9AFMAcABlAGwAbABFAD4AYwBvAHYAZQByAHIAZQBkADwALwBzAHAAYQBu +AD4ALgA8AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABhAG4APgA8AC8AbABpAD4ADQAKADwA +LwB1AGwAPgANAAoADQAKADwAaAAyACAAaQBkAD0AIgBwAG0AcABlAG4AaABhAG4AYwBlAG0AZQBu +AHQALQBzAHAAaQBrAGUALABzAGEAaQBsAGEAbgBkAHUAbgBpAHQAdABlAHMAdAAtAFQAZQBzAHQA +aQBtAHAAbABlAG0AZQBuAHQAYQB0AGkAbwBuACIAPgA8AHMAcABhAG4ADQAKAGMAbABhAHMAcwA9 +AG4AaAAtAG4AdQBtAGIAZQByAD4APABzAHAAYQBuACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAGYA +YQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAFQAaQBtAGUAcwAgAE4AZQB3 +ACAAUgBvAG0AYQBuACIAJwA+ADMALgAyAC4AIAA8AC8AcwBwAGEAbgA+ADwALwBzAHAAYQBuAD4A +PABzAHAAYQBuAA0ACgBzAHQAeQBsAGUAPQAnAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBu +AHQALQBmAGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAnAD4A +VABlAHMAdAAgAGkAbQBwAGwAZQBtAGUAbgB0AGEAdABpAG8AbgA8AG8AOgBwAD4APAAvAG8AOgBw +AD4APAAvAHMAcABhAG4APgA8AC8AaAAyAD4ADQAKAA0ACgA8AHAAPgBUAG8AIABnAGUAdAAgAGUA +eABwAGUAYwB0AGUAZAAgAGMAbwBtAGIAaQBuAGEAdABpAG8AbgBzACAAZgBvAHIAIAB0AGUAcwB0 +ACAAYwBvAG4AZgBpAGcAdQByAGEAdABpAG8AbgBzACAAYQBuAGQAIABhAGMAdABpAG8AbgAgAHQA +eQBwAGUAcwAsACAARwBOAFUAIAA8AHMAcABhAG4ADQAKAGMAbABhAHMAcwA9AFMAcABlAGwAbABF +AD4AZwBlAG4AZwBlAG4APAAvAHMAcABhAG4APgAgADEALgA0AC4AMgAgAGkAcwAgAHUAcwBlAGQA +IAB0AG8AIABnAGUAbgBlAHIAYQB0AGUAIAB0AGUAcwB0ACAAYwBhAHMAZQBzAC4APAAvAHAAPgAN +AAoADQAKADwAcAA+AFMAZQBlACYAbgBiAHMAcAA7ADwAYQAgAGgAcgBlAGYAPQAiAGgAdAB0AHAA +cwA6AC8ALwB3AHcAdwAuAGcAbgB1AC4AbwByAGcALwBzAG8AZgB0AHcAYQByAGUALwBnAGUAbgBn +AGUAbgAvACIAPgBoAHQAdABwAHMAOgAvAC8AdwB3AHcALgBnAG4AdQAuAG8AcgBnAC8AcwBvAGYA +dAB3AGEAcgBlAC8AZwBlAG4AZwBlAG4ALwA8AC8AYQA+ACYAbgBiAHMAcAA7AGYAbwByAA0ACgBp +AG4AdAByAG8AZAB1AGMAZQAgAGEAbgBkACAAZQB4AGEAbQBwAGwAZQBzAC4AIABCAGEAcwBpAGMA +YQBsAGwAeQAgAGYAbwByACAAYQBuACAAaQBuAHAAdQB0ACAAdABlAG0AcABsAGEAdABlACAAdwBp +AHQAaAAgAG0AYQBuAHkAIABwAGwAYQBjAGUALQA8AHMAcABhAG4ADQAKAGMAbABhAHMAcwA9AFMA +cABlAGwAbABFAD4AdABhAGsAZQBuAHMAPAAvAHMAcABhAG4APgAgAGwAaQBrAGUAJgBuAGIAcwBw +ADsAQAB4AHgAeABAACAAYQBuAGQAIABAADwAcwBwAGEAbgAgAGMAbABhAHMAcwA9AFMAcABlAGwA +bABFAD4AeQB5AHkAPAAvAHMAcABhAG4APgBAACwADQAKAEcATgBVACAAPABzAHAAYQBuACAAYwBs +AGEAcwBzAD0AUwBwAGUAbABsAEUAPgBnAGUAbgBnAGUAbgA8AC8AcwBwAGEAbgA+ACAAZwBlAG4A +ZQByAGEAdABlAHMAIABhACAAQwArACsAIABjAGwAYQBzAHMAIAAoAGkAbgAgAGEAbgAgAG8AdQB0 +AHAAdQB0ACAALgBoAA0ACgBmAGkAbABlACkAIAB3AGkAdABoACAAcAB1AGIAbABpAGMAIABtAGUA +dABoAG8AZABzACAAdABvACAAYQBzAHMAaQBnAG4AIABhAG4AZAAgAHIAZQBwAGwAYQBjAGUAIAB0 +AGgAZQAgAHYAYQBsAHUAZQAgAG8AZgAgAHAAbABhAGMAZQAtADwAcwBwAGEAbgANAAoAYwBsAGEA +cwBzAD0AUwBwAGUAbABsAEUAPgB0AGEAawBlAG4AcwA8AC8AcwBwAGEAbgA+AC4APAAvAHAAPgAN +AAoADQAKADwAcAA+AFQAaABlACAAdABlAHMAdAAgAGcAZQBuAGUAcgBhAHQAbwByACAAZgBvAHIA +IAA8AHMAcABhAG4AIABjAGwAYQBzAHMAPQBTAHAAZQBsAGwARQA+AHAAbQBwADwALwBzAHAAYQBu +AD4AIABlAG4AaABhAG4AYwBlAG0AZQBuAHQAIABpAHMADQAKAGMAbwBuAHMAdAByAHUAYwB0AGUA +ZAAgAGIAeQAgAGYAbwBsAGwAbwB3AGkAbgBnACAAcABhAHIAdABzADoAPAAvAHAAPgANAAoADQAK +ADwAdQBsACAAdAB5AHAAZQA9AHMAcQB1AGEAcgBlAD4ADQAKACAAPABsAGkAIABjAGwAYQBzAHMA +PQBNAHMAbwBOAG8AcgBtAGEAbAAgAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBtAGEAcgBnAGkAbgAt +AHQAbwBwAC0AYQBsAHQAOgBhAHUAdABvADsAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAGIAbwB0AHQA +bwBtAC0AYQBsAHQAOgBhAHUAdABvADsADQAKACAAIAAgACAAIABtAHMAbwAtAGwAaQBzAHQAOgBs +ADIAIABsAGUAdgBlAGwAMQAgAGwAZgBvADEAMgA7AHQAYQBiAC0AcwB0AG8AcABzADoAbABpAHMA +dAAgADMANgAuADAAcAB0ACcAPgA8AHMAcABhAG4AIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AZgBh +AHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AA0ACgAgACAAIAAgACAAIgBUAGkA +bQBlAHMAIABOAGUAdwAgAFIAbwBtAGEAbgAiACcAPgBJAG4AZgByAGEAIABmAGkAbABlAHMALAAg +AGkAbgBjAGwAdQBkAGkAbgBnACAAcwBpAG0AcABsAGkAZgBpAGUAZAAgAEMAUgBUACwAIAA8AHMA +cABhAG4ADQAKACAAIAAgACAAIABjAGwAYQBzAHMAPQBTAHAAZQBsAGwARQA+AHMAeQBzAGMAYQBs +AGwAPAAvAHMAcABhAG4APgAsACAAPABzAHAAYQBuACAAYwBsAGEAcwBzAD0AUwBwAGUAbABsAEUA +PgBsAGkAbgBrAHMAYwByAGkAcAB0AHMAPAAvAHMAcABhAG4APgAsACAAYQBuAGQAIAA8AHMAcABh +AG4ADQAKACAAIAAgACAAIABjAGwAYQBzAHMAPQBTAHAAZQBsAGwARQA+AE0AYQBrAGUAZgBpAGwA +ZQA8AC8AcwBwAGEAbgA+AC4APABvADoAcAA+ADwALwBvADoAcAA+ADwALwBzAHAAYQBuAD4APAAv +AGwAaQA+AA0ACgAgADwAbABpACAAYwBsAGEAcwBzAD0ATQBzAG8ATgBvAHIAbQBhAGwAIABzAHQA +eQBsAGUAPQAnAG0AcwBvAC0AbQBhAHIAZwBpAG4ALQB0AG8AcAAtAGEAbAB0ADoAYQB1AHQAbwA7 +AG0AcwBvAC0AbQBhAHIAZwBpAG4ALQBiAG8AdAB0AG8AbQAtAGEAbAB0ADoAYQB1AHQAbwA7AA0A +CgAgACAAIAAgACAAbQBzAG8ALQBsAGkAcwB0ADoAbAAyACAAbABlAHYAZQBsADEAIABsAGYAbwAx +ADIAOwB0AGEAYgAtAHMAdABvAHAAcwA6AGwAaQBzAHQAIAAzADYALgAwAHAAdAAnAD4APABzAHAA +YQBuACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAGYAYQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBh +AG0AaQBsAHkAOgANAAoAIAAgACAAIAAgACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4A +IgAnAD4AMwAgAGkAbgBwAHUAdAAgAHQAZQBtAHAAbABhAHQAZQAgAGYAaQBsAGUAcwAgACgALgA8 +AHMAcABhAG4AIABjAGwAYQBzAHMAPQBTAHAAZQBsAGwARQA+AGMAYwBfAHMAawBlAGwAPAAvAHMA +cABhAG4APgApAA0ACgAgACAAIAAgACAAZgBvAHIAIABDAFMAUgAsACAAbgBvAG4ALQBzAGgAYQBy +AGUAIABtAG8AZABlACAAbQBlAG0AbwByAHkAIABhAGMAYwBlAHMAcwAgAGEAbgBkACAAcwBoAGEA +cgBlACAAbQBvAGQAZQAgAG0AZQBtAG8AcgB5ACAAYQBjAGMAZQBzAHMADQAKACAAIAAgACAAIABz +AGUAcABhAHIAYQB0AGUAbAB5AC4APABvADoAcAA+ADwALwBvADoAcAA+ADwALwBzAHAAYQBuAD4A +PAAvAGwAaQA+AA0ACgAgADwAbABpACAAYwBsAGEAcwBzAD0ATQBzAG8ATgBvAHIAbQBhAGwAIABz +AHQAeQBsAGUAPQAnAG0AcwBvAC0AbQBhAHIAZwBpAG4ALQB0AG8AcAAtAGEAbAB0ADoAYQB1AHQA +bwA7AG0AcwBvAC0AbQBhAHIAZwBpAG4ALQBiAG8AdAB0AG8AbQAtAGEAbAB0ADoAYQB1AHQAbwA7 +AA0ACgAgACAAIAAgACAAbQBzAG8ALQBsAGkAcwB0ADoAbAAyACAAbABlAHYAZQBsADEAIABsAGYA +bwAxADIAOwB0AGEAYgAtAHMAdABvAHAAcwA6AGwAaQBzAHQAIAAzADYALgAwAHAAdAAnAD4APABz +AHAAYQBuACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAGYAYQByAGUAYQBzAHQALQBmAG8AbgB0AC0A +ZgBhAG0AaQBsAHkAOgANAAoAIAAgACAAIAAgACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBh +AG4AIgAnAD4AMQAgAGQAcgBpAHYAZQByACAAZgBpAGwAZQBzACAAdABvACAAdABhAGsAZQAgAHUA +cwBlACAAbwBmACAAZwBlAG4AZQByAGEAdABlAGQAIABDACsAKwAgAGMAbABhAHMAcwANAAoAIAAg +ACAAIAAgAGYAaQBsAGUAcwAuADwAbwA6AHAAPgA8AC8AbwA6AHAAPgA8AC8AcwBwAGEAbgA+ADwA +LwBsAGkAPgANAAoAPAAvAHUAbAA+AA0ACgANAAoAPABwAD4AVwBpAHQAaAAgAHQAaABlAHMAZQAg +AGYAaQBsAGUAcwAsACAAdABoAGUAIABzAHQAZQBwAHMAIAB0AG8AIAByAHUAbgAgAHQAZQBzAHQA +IABjAGEAcwBlAHMAIABhAHIAZQAgAGEAcwAgAGYAbwBsAGwAbwB3AGkAbgBnACAAKAA8AHMAcABh +AG4ADQAKAGMAbABhAHMAcwA9AFMAcABlAGwAbABFAD4ATQBhAGsAZQBmAGkAbABlADwALwBzAHAA +YQBuAD4AIAB0AGEAcwBrAHMAKQA6ADwALwBwAD4ADQAKAA0ACgA8AHUAbAAgAHQAeQBwAGUAPQBz +AHEAdQBhAHIAZQA+AA0ACgAgADwAbABpACAAYwBsAGEAcwBzAD0ATQBzAG8ATgBvAHIAbQBhAGwA +IABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AbQBhAHIAZwBpAG4ALQB0AG8AcAAtAGEAbAB0ADoAYQB1 +AHQAbwA7AG0AcwBvAC0AbQBhAHIAZwBpAG4ALQBiAG8AdAB0AG8AbQAtAGEAbAB0ADoAYQB1AHQA +bwA7AA0ACgAgACAAIAAgACAAbQBzAG8ALQBsAGkAcwB0ADoAbAAzACAAbABlAHYAZQBsADEAIABs +AGYAbwAxADMAOwB0AGEAYgAtAHMAdABvAHAAcwA6AGwAaQBzAHQAIAAzADYALgAwAHAAdAAnAD4A +PABzAHAAYQBuACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAGYAYQByAGUAYQBzAHQALQBmAG8AbgB0 +AC0AZgBhAG0AaQBsAHkAOgANAAoAIAAgACAAIAAgACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8A +bQBhAG4AIgAnAD4AVABvACAAaQBuAHYAbwBrAGUAIABHAE4AVQAgADwAcwBwAGEAbgAgAGMAbABh +AHMAcwA9AFMAcABlAGwAbABFAD4AZwBlAG4AZwBlAG4APAAvAHMAcABhAG4APgAgAHQAbwANAAoA +IAAgACAAIAAgAGcAZQBuAGUAcgBhAHQAZQAgAEMAKwArACAAYwBsAGEAcwBzACAAZgBpAGwAZQBz +AC4APABvADoAcAA+ADwALwBvADoAcAA+ADwALwBzAHAAYQBuAD4APAAvAGwAaQA+AA0ACgAgADwA +bABpACAAYwBsAGEAcwBzAD0ATQBzAG8ATgBvAHIAbQBhAGwAIABzAHQAeQBsAGUAPQAnAG0AcwBv +AC0AbQBhAHIAZwBpAG4ALQB0AG8AcAAtAGEAbAB0ADoAYQB1AHQAbwA7AG0AcwBvAC0AbQBhAHIA +ZwBpAG4ALQBiAG8AdAB0AG8AbQAtAGEAbAB0ADoAYQB1AHQAbwA7AA0ACgAgACAAIAAgACAAbQBz +AG8ALQBsAGkAcwB0ADoAbAAzACAAbABlAHYAZQBsADEAIABsAGYAbwAxADMAOwB0AGEAYgAtAHMA +dABvAHAAcwA6AGwAaQBzAHQAIAAzADYALgAwAHAAdAAnAD4APABzAHAAYQBuACAAcwB0AHkAbABl +AD0AJwBtAHMAbwAtAGYAYQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgANAAoA +IAAgACAAIAAgACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAnAD4AVABvACAAYwBv +AG0AcABpAGwAZQAgAGQAcgBpAHYAZQByACAAZgBpAGwAZQAgAHQAbwBnAGUAdABoAGUAcgAgAHcA +aQB0AGgAIABnAGUAbgBlAHIAYQB0AGUAZAAgAEMAKwArAA0ACgAgACAAIAAgACAAYwBsAGEAcwBz +ACAAZgBpAGwAZQBzACwAIAB0AG8AIABnAGUAdAAgAHQAaABlACAAdABlAHMAdAAgAGcAZQBuAGUA +cgBhAHQAbwByAC4APABvADoAcAA+ADwALwBvADoAcAA+ADwALwBzAHAAYQBuAD4APAAvAGwAaQA+ +AA0ACgAgADwAbABpACAAYwBsAGEAcwBzAD0ATQBzAG8ATgBvAHIAbQBhAGwAIABzAHQAeQBsAGUA +PQAnAG0AcwBvAC0AbQBhAHIAZwBpAG4ALQB0AG8AcAAtAGEAbAB0ADoAYQB1AHQAbwA7AG0AcwBv +AC0AbQBhAHIAZwBpAG4ALQBiAG8AdAB0AG8AbQAtAGEAbAB0ADoAYQB1AHQAbwA7AA0ACgAgACAA +IAAgACAAbQBzAG8ALQBsAGkAcwB0ADoAbAAzACAAbABlAHYAZQBsADEAIABsAGYAbwAxADMAOwB0 +AGEAYgAtAHMAdABvAHAAcwA6AGwAaQBzAHQAIAAzADYALgAwAHAAdAAnAD4APABzAHAAYQBuACAA +cwB0AHkAbABlAD0AJwBtAHMAbwAtAGYAYQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBs +AHkAOgANAAoAIAAgACAAIAAgACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAnAD4A +VABvACAAcgB1AG4AIAB0AGgAZQAgAHQAZQBzAHQAIABnAGUAbgBlAHIAYQB0AG8AcgAuACAASAB1 +AG4AZAByAGUAZABzACAAbwBmACAAdABlAHMAdAAgAGMAYQBzAGUAcwAgAHcAaQBsAGwADQAKACAA +IAAgACAAIABiAGUAIABnAGUAbgBlAHIAYQB0AGUAZAAuADwAbwA6AHAAPgA8AC8AbwA6AHAAPgA8 +AC8AcwBwAGEAbgA+ADwALwBsAGkAPgANAAoAIAA8AGwAaQAgAGMAbABhAHMAcwA9AE0AcwBvAE4A +bwByAG0AYQBsACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AdABvAHAALQBh +AGwAdAA6AGEAdQB0AG8AOwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AYgBvAHQAdABvAG0ALQBhAGwA +dAA6AGEAdQB0AG8AOwANAAoAIAAgACAAIAAgAG0AcwBvAC0AbABpAHMAdAA6AGwAMwAgAGwAZQB2 +AGUAbAAxACAAbABmAG8AMQAzADsAdABhAGIALQBzAHQAbwBwAHMAOgBsAGkAcwB0ACAAMwA2AC4A +MABwAHQAJwA+ADwAcwBwAGEAbgAgAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBmAGEAcgBlAGEAcwB0 +AC0AZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoADQAKACAAIAAgACAAIAAiAFQAaQBtAGUAcwAgAE4A +ZQB3ACAAUgBvAG0AYQBuACIAJwA+AFQAbwAgAGMAbwBtAHAAaQBsAGUAIABhAG4AZAAgAHIAdQBu +ACAAdABlAHMAdAAgAGMAYQBzAGUAcwAgACgAdwBpAHQAaAAgAEMAUgBUACwAIAA8AHMAcABhAG4A +DQAKACAAIAAgACAAIABjAGwAYQBzAHMAPQBTAHAAZQBsAGwARQA+AHMAeQBzAGMAYQBsAGwAPAAv +AHMAcABhAG4APgAsACAAPABzAHAAYQBuACAAYwBsAGEAcwBzAD0AUwBwAGUAbABsAEUAPgBsAGkA +bgBrAHMAYwBpAHAAdABzADwALwBzAHAAYQBuAD4AKQAgAG8AbgBlACAAYgB5AA0ACgAgACAAIAAg +ACAAbwBuAGUAIABvAG4AIABiAG8AdABoACAAcwBwAGkAawBlACAAYQBuAGQAIABzAGEAaQBsACAA +QwAtAGUAbQB1AGwAYQB0AG8AcgAuADwAbwA6AHAAPgA8AC8AbwA6AHAAPgA8AC8AcwBwAGEAbgA+ +ADwALwBsAGkAPgANAAoAPAAvAHUAbAA+AA0ACgANAAoAPABwAD4AQwB1AHIAcgBlAG4AdABsAHkA +IAB0AGgAZQByAGUAIABhAHIAZQAgADEAMgA4ACAAdABlAHMAdABzACAAZgBvAHIAIABDAFMAUgAg +AGEAYwBjAGUAcwBzACwAIAA1ADIAOAAgAHQAZQBzAHQAcwAgAGYAbwByACAAbgBvAG4ALQBzAGgA +YQByAGUAIABtAG8AZABlAA0ACgBhAGMAYwBlAHMAcwAgAGEAbgBkACAAMgA0ACAAdABlAHMAdABz +ACAAZgBvAHIAIABzAGgAYQByAGUAIABtAG8AZABlACAAYQBjAGMAZQBzAHMALgA8AC8AcAA+AA0A +CgANAAoAPABwAD4APABvADoAcAA+ACYAbgBiAHMAcAA7ADwALwBvADoAcAA+ADwALwBwAD4ADQAK +AA0ACgA8AC8AZABpAHYAPgANAAoADQAKADwALwBiAG8AZAB5AD4ADQAKAA0ACgA8AC8AaAB0AG0A +bAA+AA0ACgA= + +------=_NextPart_01D7437B.526C0BD0 +Content-Location: file:///C:/2AEBA2D4/pmp+enhancement+-+spike,+sail+and+unit+test_files/themedata.thmx +Content-Transfer-Encoding: base64 +Content-Type: application/vnd.ms-officetheme + +UEsDBBQABgAIAAAAIQDp3g+//wAAABwCAAATAAAAW0NvbnRlbnRfVHlwZXNdLnhtbKyRy07DMBBF +90j8g+UtSpyyQAgl6YLHjseifMDImSQWydiyp1X790zSVEKoIBZsLNkz954743K9Hwe1w5icp0qv +8kIrJOsbR12l3zdP2a1WiYEaGDxhpQ+Y9Lq+vCg3h4BJiZpSpXvmcGdMsj2OkHIfkKTS+jgCyzV2 +JoD9gA7NdVHcGOuJkTjjyUPX5QO2sB1YPe7l+Zgk4pC0uj82TqxKQwiDs8CS1Oyo+UbJFkIuyrkn +9S6kK4mhzVnCVPkZsOheZTXRNajeIPILjBLDsAyJX89nIBkt5r87nons29ZZbLzdjrKOfDZezE7B +/xRg9T/oE9PMf1t/AgAA//8DAFBLAwQUAAYACAAAACEApdan58AAAAA2AQAACwAAAF9yZWxzLy5y +ZWxzhI/PasMwDIfvhb2D0X1R0sMYJXYvpZBDL6N9AOEof2giG9sb69tPxwYKuwiEpO/3qT3+rov5 +4ZTnIBaaqgbD4kM/y2jhdj2/f4LJhaSnJQhbeHCGo3vbtV+8UNGjPM0xG6VItjCVEg+I2U+8Uq5C +ZNHJENJKRds0YiR/p5FxX9cfmJ4Z4DZM0/UWUtc3YK6PqMn/s8MwzJ5PwX+vLOVFBG43lExp5GKh +qC/jU72QqGWq1B7Qtbj51v0BAAD//wMAUEsDBBQABgAIAAAAIQBreZYWgwAAAIoAAAAcAAAAdGhl +bWUvdGhlbWUvdGhlbWVNYW5hZ2VyLnhtbAzMTQrDIBBA4X2hd5DZN2O7KEVissuuu/YAQ5waQceg +0p/b1+XjgzfO3xTVm0sNWSycBw2KZc0uiLfwfCynG6jaSBzFLGzhxxXm6XgYybSNE99JyHNRfSPV +kIWttd0g1rUr1SHvLN1euSRqPYtHV+jT9yniResrJgoCOP0BAAD//wMAUEsDBBQABgAIAAAAIQC2 +9GeYkwcAAMkgAAAWAAAAdGhlbWUvdGhlbWUvdGhlbWUxLnhtbOxZzYsbyRW/B/I/NH2X9dWtj8Hy +ok/P2jO2sWSHPdZIpe7yVHeJqtKMxWII3lMugcAm5JCFve0hhCzswi655I8x2CSbPyKvqlvdVVLJ +nhkcMGFGMHSXfu/Vr9579d5T1d3PXibUu8BcEJb2/Pqdmu/hdM4WJI16/rPZpNLxPSFRukCUpbjn +b7DwP7v361/dRUcyxgn2QD4VR6jnx1KujqpVMYdhJO6wFU7huyXjCZLwyqPqgqNL0JvQaqNWa1UT +RFLfS1ECah8vl2SOvZlS6d/bKh9TeE2lUANzyqdKNbYkNHZxXlcIsRFDyr0LRHs+zLNglzP8Uvoe +RULCFz2/pv/86r27VXSUC1F5QNaQm+i/XC4XWJw39Jw8OismDYIwaPUL/RpA5T5u3B63xq1Cnwag ++RxWmnGxdbYbwyDHGqDs0aF71B416xbe0N/c49wP1cfCa1CmP9jDTyZDsKKF16AMH+7hw0F3MLL1 +a1CGb+3h27X+KGhb+jUopiQ930PXwlZzuF1tAVkyeuyEd8Ng0m7kyksUREMRXWqKJUvloVhL0AvG +JwBQQIokST25WeElmkMUDxElZ5x4JySKIfBWKGUChmuN2qTWhP/qE+gn7VF0hJEhrXgBE7E3pPh4 +Ys7JSvb8B6DVNyBvf/75zesf37z+6c1XX715/fd8bq3KkjtGaWTK/fLdH/7zzW+9f//w7S9f/zGb +ehcvTPy7v/3u3T/++T71sOLSFG//9P27H79/++ff/+uvXzu09zk6M+EzkmDhPcKX3lOWwAId/PEZ +v57ELEbElOinkUApUrM49I9lbKEfbRBFDtwA23Z8ziHVuID31y8swtOYryVxaHwYJxbwlDE6YNxp +hYdqLsPMs3UauSfnaxP3FKEL19xDlFpeHq9XkGOJS+UwxhbNJxSlEkU4xdJT37FzjB2r+4IQy66n +ZM6ZYEvpfUG8ASJOk8zImRVNpdAxScAvGxdB8Ldlm9Pn3oBR16pH+MJGwt5A1EF+hqllxvtoLVHi +UjlDCTUNfoJk7CI53fC5iRsLCZ6OMGXeeIGFcMk85rBew+kPIc243X5KN4mN5JKcu3SeIMZM5Iid +D2OUrFzYKUljE/u5OIcQRd4TJl3wU2bvEPUOfkDpQXc/J9hy94ezwTPIsCalMkDUN2vu8OV9zKz4 +nW7oEmFXqunzxEqxfU6c0TFYR1Zon2BM0SVaYOw9+9zBYMBWls1L0g9iyCrH2BVYD5Adq+o9xQJ6 +JdXc7OfJEyKskJ3iiB3gc7rZSTwblCaIH9L8CLxu2nwMpS5xBcBjOj83gY8I9IAQL06jPBagwwju +g1qfxMgqYOpduON1wy3/XWWPwb58YdG4wr4EGXxtGUjspsx7bTND1JqgDJgZgi7DlW5BxHJ/KaKK +qxZbO+WW9qYt3QDdkdX0JCT9YAe00/uE/7veBzqMt3/5xrEPPk6/41ZsJatrdjqHksnxTn9zCLfb +1QwZX5BPv6kZoXX6BEMd2c9Ytz3NbU/j/9/3NIf2820nc6jfuO1kfOgwbjuZ/HDl43QyZfMCfY06 +8MgOevSxT3Lw1GdJKJ3KDcUnQh/8CPg9s5jAoJLTJ564OAVcxfCoyhxMYOEijrSMx5n8DZHxNEYr +OB2q+0pJJHLVkfBWTMChkR526lZ4uk5O2SI77KzX1cFmVlkFkuV4LSzG4aBKZuhWuzzAK9RrtpE+ +aN0SULLXIWFMZpNoOki0t4PKSPpYF4zmIKFX9lFYdB0sOkr91lV7LIBa4RX4we3Bz/SeHwYgAkJw +HgfN+UL5KXP11rvamR/T04eMaUUANNjbCCg93VVcDy5PrS4LtSt42iJhhJtNQltGN3gihp/BeXSq +0avQuK6vu6VLLXrKFHo+CK2SRrvzPhY39TXI7eYGmpqZgqbeZc9vNUMImTla9fwlHBrDY7KC2BHq +NxeiEdy8zCXPNvxNMsuKCzlCIs4MrpNOlg0SIjH3KEl6vlp+4Qaa6hyiudUbkBA+WXJdSCufGjlw +uu1kvFziuTTdbowoS2evkOGzXOH8VovfHKwk2RrcPY0Xl94ZXfOnCEIsbNeVARdEwN1BPbPmgsBl +WJHIyvjbKUx52jVvo3QMZeOIrmKUVxQzmWdwncoLOvqtsIHxlq8ZDGqYJC+EZ5EqsKZRrWpaVI2M +w8Gq+2EhZTkjaZY108oqqmq6s5g1w7YM7NjyZkXeYLU1MeQ0s8JnqXs35Xa3uW6nTyiqBBi8sJ+j +6l6hIBjUysksaorxfhpWOTsftWvHdoEfoHaVImFk/dZW7Y7dihrhnA4Gb1T5QW43amFoue0rtaX1 +rbl5sc3OXkDyGEGXu6ZSaFfCyS5H0BBNdU+SpQ3YIi9lvjXgyVtz0vO/rIX9YNgIh5VaJxxXgmZQ +q3TCfrPSD8NmfRzWa6NB4xUUFhkn9TC7sZ/ABQbd5Pf2enzv7j7Z3tHcmbOkyvTdfFUT13f39cbh +u3uPQNL5stWYdJvdQavSbfYnlWA06FS6w9agMmoN26PJaBh2upNXvnehwUG/OQxa406lVR8OK0Gr +puh3upV20Gj0g3a/Mw76r/I2BlaepY/cFmBezevefwEAAP//AwBQSwMEFAAGAAgAAAAhAA3RkJ+2 +AAAAGwEAACcAAAB0aGVtZS90aGVtZS9fcmVscy90aGVtZU1hbmFnZXIueG1sLnJlbHOEj00KwjAU +hPeCdwhvb9O6EJEm3YjQrdQDhOQ1DTY/JFHs7Q2uLAguh2G+mWm7l53JE2My3jFoqhoIOumVcZrB +bbjsjkBSFk6J2TtksGCCjm837RVnkUsoTSYkUiguMZhyDidKk5zQilT5gK44o49W5CKjpkHIu9BI +93V9oPGbAXzFJL1iEHvVABmWUJr/s/04GolnLx8WXf5RQXPZhQUoosbM4CObqkwEylu6usTfAAAA +//8DAFBLAQItABQABgAIAAAAIQDp3g+//wAAABwCAAATAAAAAAAAAAAAAAAAAAAAAABbQ29udGVu +dF9UeXBlc10ueG1sUEsBAi0AFAAGAAgAAAAhAKXWp+fAAAAANgEAAAsAAAAAAAAAAAAAAAAAMAEA +AF9yZWxzLy5yZWxzUEsBAi0AFAAGAAgAAAAhAGt5lhaDAAAAigAAABwAAAAAAAAAAAAAAAAAGQIA +AHRoZW1lL3RoZW1lL3RoZW1lTWFuYWdlci54bWxQSwECLQAUAAYACAAAACEAtvRnmJMHAADJIAAA +FgAAAAAAAAAAAAAAAADWAgAAdGhlbWUvdGhlbWUvdGhlbWUxLnhtbFBLAQItABQABgAIAAAAIQAN +0ZCftgAAABsBAAAnAAAAAAAAAAAAAAAAAJ0KAAB0aGVtZS90aGVtZS9fcmVscy90aGVtZU1hbmFn +ZXIueG1sLnJlbHNQSwUGAAAAAAUABQBdAQAAmAsAAAAA + +------=_NextPart_01D7437B.526C0BD0 +Content-Location: file:///C:/2AEBA2D4/pmp+enhancement+-+spike,+sail+and+unit+test_files/colorschememapping.xml +Content-Transfer-Encoding: quoted-printable +Content-Type: text/xml + + + +------=_NextPart_01D7437B.526C0BD0 +Content-Location: file:///C:/2AEBA2D4/pmp+enhancement+-+spike,+sail+and+unit+test_files/filelist.xml +Content-Transfer-Encoding: quoted-printable +Content-Type: text/xml; charset="utf-8" + + + + + + + +------=_NextPart_01D7437B.526C0BD0-- diff --git a/vendor/riscv-isa-sim/tests/mseccfg/syscalls.c b/vendor/riscv-isa-sim/tests/mseccfg/syscalls.c new file mode 100644 index 00000000..9b526f29 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/syscalls.c @@ -0,0 +1,485 @@ +// See LICENSE for license details. + +#include +#include +#include +#include +#include +#include +#include "util.h" + +#define SYS_write 64 + +#undef strcmp + +extern volatile uint64_t tohost; +extern volatile uint64_t fromhost; + +static uintptr_t syscall(uintptr_t which, uint64_t arg0, uint64_t arg1, uint64_t arg2) +{ + volatile uint64_t magic_mem[8] __attribute__((aligned(64))); + magic_mem[0] = which; + magic_mem[1] = arg0; + magic_mem[2] = arg1; + magic_mem[3] = arg2; + __sync_synchronize(); + + tohost = (uintptr_t)magic_mem; + while (fromhost == 0) + ; + fromhost = 0; + + __sync_synchronize(); + return magic_mem[0]; +} + +#define NUM_COUNTERS 2 +static uintptr_t counters[NUM_COUNTERS]; +static char* counter_names[NUM_COUNTERS]; + +void setStats(int enable) +{ + int i = 0; +#define READ_CTR(name) do { \ + while (i >= NUM_COUNTERS) ; \ + uintptr_t csr = read_csr(name); \ + if (!enable) { csr -= counters[i]; counter_names[i] = #name; } \ + counters[i++] = csr; \ + } while (0) + + READ_CTR(mcycle); + READ_CTR(minstret); + +#undef READ_CTR +} + +void __attribute__((noreturn)) tohost_exit(uintptr_t code) +{ + tohost = (code << 1) | 1; + while (1); +} + +uintptr_t __attribute__((weak)) handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + +void exit(int code) +{ + tohost_exit(code); +} + +void abort() +{ + exit(128 + SIGABRT); +} + +void printstr(const char* s) +{ + syscall(SYS_write, 1, (uintptr_t)s, strlen(s)); +} + +void __attribute__((weak)) thread_entry(int cid, int nc) +{ + // multi-threaded programs override this function. + // for the case of single-threaded programs, only let core 0 proceed. + while (cid != 0); +} + +int __attribute__((weak)) main(int argc, char** argv) +{ + // single-threaded programs override this function. + printstr("Implement main(), foo!\n"); + return -1; +} + +static void init_tls() +{ + register void* thread_pointer asm("tp"); + extern char _tdata_begin, _tdata_end, _tbss_end; + size_t tdata_size = &_tdata_end - &_tdata_begin; + memcpy(thread_pointer, &_tdata_begin, tdata_size); + size_t tbss_size = &_tbss_end - &_tdata_end; + memset(thread_pointer + tdata_size, 0, tbss_size); +} + +void _init(int cid, int nc) +{ + init_tls(); + thread_entry(cid, nc); + + // only single-threaded programs should ever get here. + int ret = main(0, 0); + + char buf[NUM_COUNTERS * 32] __attribute__((aligned(64))); + char* pbuf = buf; + for (int i = 0; i < NUM_COUNTERS; i++) + if (counters[i]) + pbuf += sprintf(pbuf, "%s = %d\n", counter_names[i], counters[i]); + if (pbuf != buf) + printstr(buf); + + exit(ret); +} + +#undef putchar +int putchar(int ch) +{ + static __thread char buf[64] __attribute__((aligned(64), section(".tls_start"))); +#if DEBUG_FIRST_PUTCHAR + static __thread int buflen = -1; + + if (buflen == -1) { + for (int i=0; i<16; i++) { + buf[i] = ((uint64_t)buf >> (4 * (15-i))) & 0xF; + if (buf[i] < 10) buf[i] += '0'; + else buf[i] += 'A' - 10; + } + + buf[16] = '-'; + buf[17] = ' '; + buflen = 18; + } +#else + static __thread int buflen = 0; +#endif + + buf[buflen++] = ch; + + if (ch == '\n' || buflen == sizeof(buf)) + { + syscall(SYS_write, 1, (uintptr_t)buf, buflen); + buflen = 0; + } + + return 0; +} + +void printhex(uint64_t x) +{ + char str[17]; + int i; + for (i = 0; i < 16; i++) + { + str[15-i] = (x & 0xF) + ((x & 0xF) < 10 ? '0' : 'a'-10); + x >>= 4; + } + str[16] = 0; + + printstr(str); +} + +static inline void printnum(void (*putch)(int, void**), void **putdat, + unsigned long long num, unsigned base, int width, int padc) +{ + unsigned digs[sizeof(num)*CHAR_BIT]; + int pos = 0; + + while (1) + { + digs[pos++] = num % base; + if (num < base) + break; + num /= base; + } + + while (width-- > pos) + putch(padc, putdat); + + while (pos-- > 0) + putch(digs[pos] + (digs[pos] >= 10 ? 'a' - 10 : '0'), putdat); +} + +static unsigned long long getuint(va_list *ap, int lflag) +{ + if (lflag >= 2) + return va_arg(*ap, unsigned long long); + else if (lflag) + return va_arg(*ap, unsigned long); + else + return va_arg(*ap, unsigned int); +} + +static long long getint(va_list *ap, int lflag) +{ + if (lflag >= 2) + return va_arg(*ap, long long); + else if (lflag) + return va_arg(*ap, long); + else + return va_arg(*ap, int); +} + +static void vprintfmt(void (*putch)(int, void**), void **putdat, const char *fmt, va_list ap) +{ + register const char* p; + const char* last_fmt; + register int ch, err; + unsigned long long num; + int base, lflag, width, precision, altflag; + char padc; + + while (1) { + while ((ch = *(unsigned char *) fmt) != '%') { + if (ch == '\0') + return; + fmt++; + putch(ch, putdat); + } + fmt++; + + // Process a %-escape sequence + last_fmt = fmt; + padc = ' '; + width = -1; + precision = -1; + lflag = 0; + altflag = 0; + reswitch: + switch (ch = *(unsigned char *) fmt++) { + + // flag to pad on the right + case '-': + padc = '-'; + goto reswitch; + + // flag to pad with 0's instead of spaces + case '0': + padc = '0'; + goto reswitch; + + // width field + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + for (precision = 0; ; ++fmt) { + precision = precision * 10 + ch - '0'; + ch = *fmt; + if (ch < '0' || ch > '9') + break; + } + goto process_precision; + + case '*': + precision = va_arg(ap, int); + goto process_precision; + + case '.': + if (width < 0) + width = 0; + goto reswitch; + + case '#': + altflag = 1; + goto reswitch; + + process_precision: + if (width < 0) + width = precision, precision = -1; + goto reswitch; + + // long flag (doubled for long long) + case 'l': + lflag++; + goto reswitch; + + // character + case 'c': + putch(va_arg(ap, int), putdat); + break; + + // string + case 's': + if ((p = va_arg(ap, char *)) == NULL) + p = "(null)"; + if (width > 0 && padc != '-') + for (width -= strnlen(p, precision); width > 0; width--) + putch(padc, putdat); + for (; (ch = *p) != '\0' && (precision < 0 || --precision >= 0); width--) { + putch(ch, putdat); + p++; + } + for (; width > 0; width--) + putch(' ', putdat); + break; + + // (signed) decimal + case 'd': + num = getint(&ap, lflag); + if ((long long) num < 0) { + putch('-', putdat); + num = -(long long) num; + } + base = 10; + goto signed_number; + + // unsigned decimal + case 'u': + base = 10; + goto unsigned_number; + + // (unsigned) octal + case 'o': + // should do something with padding so it's always 3 octits + base = 8; + goto unsigned_number; + + // pointer + case 'p': + static_assert(sizeof(long) == sizeof(void*)); + lflag = 1; + putch('0', putdat); + putch('x', putdat); + /* fall through to 'x' */ + + // (unsigned) hexadecimal + case 'x': + base = 16; + unsigned_number: + num = getuint(&ap, lflag); + signed_number: + printnum(putch, putdat, num, base, width, padc); + break; + + // escaped '%' character + case '%': + putch(ch, putdat); + break; + + // unrecognized escape sequence - just print it literally + default: + putch('%', putdat); + fmt = last_fmt; + break; + } + } +} + +int printf(const char* fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + + vprintfmt((void*)putchar, 0, fmt, ap); + + va_end(ap); + return 0; // incorrect return value, but who cares, anyway? +} + +int sprintf(char* str, const char* fmt, ...) +{ + va_list ap; + char* str0 = str; + va_start(ap, fmt); + + void sprintf_putch(int ch, void** data) + { + char** pstr = (char**)data; + **pstr = ch; + (*pstr)++; + } + + vprintfmt(sprintf_putch, (void**)&str, fmt, ap); + *str = 0; + + va_end(ap); + return str - str0; +} + +void* memcpy(void* dest, const void* src, size_t len) +{ + if ((((uintptr_t)dest | (uintptr_t)src | len) & (sizeof(uintptr_t)-1)) == 0) { + const uintptr_t* s = src; + uintptr_t *d = dest; + while (d < (uintptr_t*)(dest + len)) + *d++ = *s++; + } else { + const char* s = src; + char *d = dest; + while (d < (char*)(dest + len)) + *d++ = *s++; + } + return dest; +} + +void* memset(void* dest, int byte, size_t len) +{ + if ((((uintptr_t)dest | len) & (sizeof(uintptr_t)-1)) == 0) { + uintptr_t word = byte & 0xFF; + word |= word << 8; + word |= word << 16; + word |= word << 16 << 16; + + uintptr_t *d = dest; + while (d < (uintptr_t*)(dest + len)) + *d++ = word; + } else { + char *d = dest; + while (d < (char*)(dest + len)) + *d++ = byte; + } + return dest; +} + +size_t strlen(const char *s) +{ + const char *p = s; + while (*p) + p++; + return p - s; +} + +size_t strnlen(const char *s, size_t n) +{ + const char *p = s; + while (n-- && *p) + p++; + return p - s; +} + +int strcmp(const char* s1, const char* s2) +{ + unsigned char c1, c2; + + do { + c1 = *s1++; + c2 = *s2++; + } while (c1 != 0 && c1 == c2); + + return c1 - c2; +} + +char* strcpy(char* dest, const char* src) +{ + char* d = dest; + while ((*d++ = *src++)) + ; + return dest; +} + +long atol(const char* str) +{ + long res = 0; + int sign = 0; + + while (*str == ' ') + str++; + + if (*str == '-' || *str == '+') { + sign = *str == '-'; + str++; + } + + while (*str) { + res *= 10; + res += *str++ - '0'; + } + + return sign ? -res : res; +} diff --git a/vendor/riscv-isa-sim/tests/mseccfg/util.h b/vendor/riscv-isa-sim/tests/mseccfg/util.h new file mode 100644 index 00000000..081cfd63 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/util.h @@ -0,0 +1,90 @@ +// See LICENSE for license details. + +#ifndef __UTIL_H +#define __UTIL_H + +extern void setStats(int enable); + +#include + +#define static_assert(cond) switch(0) { case 0: case !!(long)(cond): ; } + +static int verify(int n, const volatile int* test, const int* verify) +{ + int i; + // Unrolled for faster verification + for (i = 0; i < n/2*2; i+=2) + { + int t0 = test[i], t1 = test[i+1]; + int v0 = verify[i], v1 = verify[i+1]; + if (t0 != v0) return i+1; + if (t1 != v1) return i+2; + } + if (n % 2 != 0 && test[n-1] != verify[n-1]) + return n; + return 0; +} + +static int verifyDouble(int n, const volatile double* test, const double* verify) +{ + int i; + // Unrolled for faster verification + for (i = 0; i < n/2*2; i+=2) + { + double t0 = test[i], t1 = test[i+1]; + double v0 = verify[i], v1 = verify[i+1]; + int eq1 = t0 == v0, eq2 = t1 == v1; + if (!(eq1 & eq2)) return i+1+eq1; + } + if (n % 2 != 0 && test[n-1] != verify[n-1]) + return n; + return 0; +} + +static void __attribute__((noinline)) barrier(int ncores) +{ + static volatile int sense; + static volatile int count; + static __thread int threadsense; + + __sync_synchronize(); + + threadsense = !threadsense; + if (__sync_fetch_and_add(&count, 1) == ncores-1) + { + count = 0; + sense = threadsense; + } + else while(sense != threadsense) + ; + + __sync_synchronize(); +} + +static uint64_t lfsr(uint64_t x) +{ + uint64_t bit = (x ^ (x >> 1)) & 1; + return (x >> 1) | (bit << 62); +} + +static uintptr_t insn_len(uintptr_t pc) +{ + return (*(unsigned short*)pc & 3) ? 4 : 2; +} + +#ifdef __riscv +#include "encoding.h" +#endif + +#define stringify_1(s) #s +#define stringify(s) stringify_1(s) +#define stats(code, iter) do { \ + unsigned long _c = -read_csr(mcycle), _i = -read_csr(minstret); \ + code; \ + _c += read_csr(mcycle), _i += read_csr(minstret); \ + if (cid == 0) \ + printf("\n%s: %ld cycles, %ld.%ld cycles/iter, %ld.%ld CPI\n", \ + stringify(code), _c, _c/iter, 10*_c/iter%10, _c/_i, 10*_c/_i%10); \ + } while(0) + +#endif //__UTIL_H diff --git a/vendor/riscv-isa-sim/tests/testlib.py b/vendor/riscv-isa-sim/tests/testlib.py new file mode 100644 index 00000000..d5e8d795 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/testlib.py @@ -0,0 +1,116 @@ +import os.path +import pexpect +import subprocess +import tempfile +import testlib +import unittest + +# Note that gdb comes with its own testsuite. I was unable to figure out how to +# run that testsuite against the spike simulator. + +def find_file(path): + for directory in (os.getcwd(), os.path.dirname(testlib.__file__)): + fullpath = os.path.join(directory, path) + if os.path.exists(fullpath): + return fullpath + return None + +def compile(*args): + """Compile a single .c file into a binary.""" + dst = os.path.splitext(args[0])[0] + cc = os.path.expandvars("$RISCV/bin/riscv64-unknown-elf-gcc") + cmd = [cc, "-g", "-O", "-o", dst] + for arg in args: + found = find_file(arg) + if found: + cmd.append(found) + else: + cmd.append(arg) + cmd = " ".join(cmd) + result = os.system(cmd) + assert result == 0, "%r failed" % cmd + return dst + +def unused_port(): + # http://stackoverflow.com/questions/2838244/get-open-tcp-port-in-python/2838309#2838309 + import socket + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.bind(("",0)) + port = s.getsockname()[1] + s.close() + return port + +class Spike(object): + def __init__(self, binary, halted=False, with_gdb=True, timeout=None): + """Launch spike. Return tuple of its process and the port it's running on.""" + cmd = [] + if timeout: + cmd += ["timeout", str(timeout)] + + cmd += [find_file("spike")] + if halted: + cmd.append('-H') + if with_gdb: + self.port = unused_port() + cmd += ['--gdb-port', str(self.port)] + cmd.append('pk') + if binary: + cmd.append(binary) + logfile = open("spike.log", "w") + self.process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=logfile, + stderr=logfile) + + def __del__(self): + try: + self.process.kill() + self.process.wait() + except OSError: + pass + + def wait(self, *args, **kwargs): + return self.process.wait(*args, **kwargs) + +class Gdb(object): + def __init__(self): + path = os.path.expandvars("$RISCV/bin/riscv64-unknown-elf-gdb") + self.child = pexpect.spawn(path) + self.child.logfile = file("gdb.log", "w") + self.wait() + self.command("set width 0") + self.command("set height 0") + # Force consistency. + self.command("set print entry-values no") + + def wait(self): + """Wait for prompt.""" + self.child.expect("\(gdb\)") + + def command(self, command, timeout=-1): + self.child.sendline(command) + self.child.expect("\n", timeout=timeout) + self.child.expect("\(gdb\)", timeout=timeout) + return self.child.before.strip() + + def c(self, wait=True): + if wait: + return self.command("c") + else: + self.child.sendline("c") + self.child.expect("Continuing") + + def interrupt(self): + self.child.send("\003"); + self.child.expect("\(gdb\)") + + def x(self, address, size='w'): + output = self.command("x/%s %s" % (size, address)) + value = int(output.split(':')[1].strip(), 0) + return value + + def p(self, obj): + output = self.command("p %s" % obj) + value = int(output.split('=')[-1].strip()) + return value + + def stepi(self): + return self.command("stepi") diff --git a/vendor/riscv_isa_sim.lock.hjson b/vendor/riscv_isa_sim.lock.hjson index f71b9c61..3e076f28 100644 --- a/vendor/riscv_isa_sim.lock.hjson +++ b/vendor/riscv_isa_sim.lock.hjson @@ -8,7 +8,7 @@ { upstream: { - url: https://github.com/joxie/riscv-isa-sim - rev: c2186bf1731b2a123ccc785ce9585861d370886f + url: https://github.com/Saad525/riscv-isa-sim + rev: 9d44dcb2818c98412f9a264076d568dd5566d7f1 } } diff --git a/vendor/riscv_isa_sim.vendor.hjson b/vendor/riscv_isa_sim.vendor.hjson index 3310cfcd..381a1f24 100644 --- a/vendor/riscv_isa_sim.vendor.hjson +++ b/vendor/riscv_isa_sim.vendor.hjson @@ -2,11 +2,11 @@ // Licensed under the Apache License, Version 2.0, see LICENSE for details. // SPDX-License-Identifier: Apache-2.0 { - name: "epmp-tests", - target_dir: "epmp-tests", + name: "riscv-isa-sim", + target_dir: "riscv-isa-sim", upstream: { - url: "https://github.com/joxie/riscv-isa-sim", + url: "https://github.com/Saad525/riscv-isa-sim", rev: "master", }, -} +} \ No newline at end of file